diff --git a/Cargo.lock b/Cargo.lock index 9c07a61c74d44..abd7deffe3521 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1672,14 +1672,14 @@ dependencies = [ "flate2", "hex", "itertools 0.12.1", + "legacy-move-compiler", "libsecp256k1", "log", - "lru 0.7.8", "merlin", "move-binary-format", "move-cli", "move-command-line-common", - "move-compiler", + "move-compiler-v2", "move-core-types", "move-docgen", "move-model", @@ -1687,6 +1687,7 @@ dependencies = [ "move-prover", "move-prover-boogie-backend", "move-prover-bytecode-pipeline", + "move-prover-lab", "move-stackless-bytecode", "move-unit-test", "move-vm-runtime", @@ -1704,8 +1705,8 @@ dependencies = [ "siphasher", "smallvec", "tempfile", - "thiserror", "tiny-keccak", + "toml 0.8.2", ] [[package]] @@ -2583,6 +2584,8 @@ version = "0.1.1" dependencies = [ "aptos-gas-schedule", "aptos-native-interface", + "aptos-types", + "bcs 0.1.6 (git+https://github.com/movementlabsxyz/bcs.git?rev=bc16d2d39cabafaabd76173dd1b04b2aa170cf0c)", "dir-diff", "file_diff", "move-cli", @@ -9527,6 +9530,28 @@ dependencies = [ "thiserror", ] +[[package]] +name = "legacy-move-compiler" +version = "0.0.1" +dependencies = [ + "anyhow", + "bcs 0.1.6 (git+https://github.com/movementlabsxyz/bcs.git?rev=bc16d2d39cabafaabd76173dd1b04b2aa170cf0c)", + "clap 4.5.13", + "codespan-reporting", + "hex", + "move-binary-format", + "move-bytecode-source-map", + "move-bytecode-verifier", + "move-command-line-common", + "move-core-types", + "move-ir-types", + "move-symbol-pool", + "once_cell", + "petgraph 0.5.1", + "regex", + "tempfile", +] + [[package]] name = "levenshtein" version = "1.0.5" @@ -10635,6 +10660,23 @@ dependencies = [ "walkdir", ] +[[package]] +name = "move-prover-lab" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "clap 4.5.13", + "codespan-reporting", + "itertools 0.12.1", + "move-model", + "move-prover", + "move-prover-boogie-backend", + "move-prover-bytecode-pipeline", + "plotters", + "z3tracer", +] + [[package]] name = "move-prover-test-utils" version = "0.1.0" @@ -12847,25 +12889,6 @@ dependencies = [ "protobuf-codegen", ] -[[package]] -name = "prover-lab" -version = "0.1.0" -dependencies = [ - "anyhow", - "chrono", - "clap 4.5.13", - "codespan-reporting", - "itertools 0.12.1", - "log", - "move-compiler", - "move-model", - "move-prover", - "move-prover-boogie-backend", - "move-prover-bytecode-pipeline", - "plotters", - "z3tracer", -] - [[package]] name = "psl-types" version = "2.0.11" diff --git a/Cargo.toml b/Cargo.toml index 8c0c58c45c078..084e0a2b64c00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,250 +2,251 @@ resolver = "2" members = [ - "api", - "api/openapi-spec-generator", - "api/test-context", - "api/types", - "aptos-move/aptos-abstract-gas-usage", - "aptos-move/aptos-aggregator", - "aptos-move/aptos-gas-algebra", - "aptos-move/aptos-gas-meter", - "aptos-move/aptos-gas-profiling", - "aptos-move/aptos-gas-schedule", - "aptos-move/aptos-gas-schedule-updator", - "aptos-move/aptos-memory-usage-tracker", - "aptos-move/aptos-native-interface", - "aptos-move/aptos-release-builder", - "aptos-move/aptos-resource-viewer", - "aptos-move/aptos-sdk-builder", - "aptos-move/aptos-transaction-benchmarks", - "aptos-move/aptos-transactional-test-harness", - "aptos-move/aptos-validator-interface", - "aptos-move/aptos-vm", - "aptos-move/aptos-vm-logging", - "aptos-move/aptos-vm-profiling", - "aptos-move/aptos-vm-types", - "aptos-move/block-executor", - "aptos-move/e2e-benchmark", - "aptos-move/e2e-move-tests", - "aptos-move/e2e-tests", - "aptos-move/e2e-testsuite", - "aptos-move/framework", - "aptos-move/framework/cached-packages", - "aptos-move/framework/table-natives", - "aptos-move/move-examples", - "aptos-move/mvhashmap", - "aptos-move/package-builder", - "aptos-move/vm-genesis", - "aptos-move/writeset-transaction-generator", - "aptos-node", - "aptos-utils", - "config", - "config/global-constants", - "consensus", - "consensus/consensus-types", - "consensus/safety-rules", - "crates/aptos", - "crates/aptos-admin-service", - "crates/aptos-api-tester", - "crates/aptos-bcs-utils", - "crates/aptos-bitvec", - "crates/aptos-build-info", - "crates/aptos-collections", - "crates/aptos-compression", - "crates/aptos-crypto", - "crates/aptos-crypto-derive", - "crates/aptos-debugger", - "crates/aptos-dkg", - "crates/aptos-drop-helper", - "crates/aptos-enum-conversion-derive", - "crates/aptos-faucet/cli", - "crates/aptos-faucet/core", - "crates/aptos-faucet/metrics-server", - "crates/aptos-faucet/service", - "crates/aptos-genesis", - "crates/aptos-github-client", - "crates/aptos-id-generator", - "crates/aptos-infallible", - "crates/aptos-inspection-service", - "crates/aptos-jwk-consensus", - "crates/aptos-keygen", - "crates/aptos-ledger", - "crates/aptos-log-derive", - "crates/aptos-logger", - "crates/aptos-metrics-core", - "aptos-move/aptos-debugger", - "crates/aptos-network-checker", - "crates/aptos-node-identity", - "crates/aptos-openapi", - "crates/aptos-profiler", - "crates/aptos-proptest-helpers", - "crates/aptos-push-metrics", - "crates/aptos-rate-limiter", - "crates/aptos-rest-client", - "crates/aptos-retrier", - "crates/aptos-rosetta", - "crates/aptos-rosetta-cli", - "crates/aptos-runtimes", - "crates/aptos-speculative-state-helper", - "crates/aptos-system-utils", - "crates/aptos-telemetry", - "crates/aptos-telemetry-service", - "crates/aptos-temppath", - "crates/aptos-time-service", - "crates/aptos-warp-webserver", - "crates/bounded-executor", - "crates/channel", - "crates/crash-handler", - "crates/fallible", - "crates/indexer", - "crates/jwk-utils", - "crates/node-resource-metrics", - "crates/num-variants", - "crates/proxy", - "crates/reliable-broadcast", - "crates/short-hex-str", - "crates/transaction-emitter", - "crates/transaction-emitter-lib", - "crates/transaction-generator-lib", - "crates/validator-transaction-pool", - "devtools/aptos-cargo-cli", - "dkg", - "ecosystem/indexer-grpc/indexer-grpc-cache-worker", - "ecosystem/indexer-grpc/indexer-grpc-data-service", - "ecosystem/indexer-grpc/indexer-grpc-file-store", - "ecosystem/indexer-grpc/indexer-grpc-fullnode", - "ecosystem/indexer-grpc/indexer-grpc-in-memory-cache-benchmark", - "ecosystem/indexer-grpc/indexer-grpc-integration-tests", - "ecosystem/indexer-grpc/indexer-grpc-server-framework", - "ecosystem/indexer-grpc/indexer-grpc-table-info", - "ecosystem/indexer-grpc/indexer-grpc-utils", - "ecosystem/indexer-grpc/transaction-filter", - "ecosystem/node-checker", - "ecosystem/node-checker/fn-check-client", - "execution/block-partitioner", - "execution/executor", - "execution/executor-benchmark", - "execution/executor-service", - "execution/executor-test-helpers", - "execution/executor-types", - "experimental/execution/ptx-executor", - "experimental/runtimes", - "experimental/storage/layered-map", - "keyless/circuit", - "keyless/common", - "keyless/pepper/common", - "keyless/pepper/example-client-rust", - "keyless/pepper/service", - "mempool", - "network/benchmark", - "network/builder", - "network/discovery", - "network/framework", - "network/memsocket", - "network/netcore", - "peer-monitoring-service/client", - "peer-monitoring-service/server", - "peer-monitoring-service/types", - "protos/rust", - "sdk", - "secure/net", - "secure/storage", - "secure/storage/vault", - "state-sync/aptos-data-client", - "state-sync/data-streaming-service", - "state-sync/inter-component/consensus-notifications", - "state-sync/inter-component/event-notifications", - "state-sync/inter-component/mempool-notifications", - "state-sync/inter-component/storage-service-notifications", - "state-sync/state-sync-driver", - "state-sync/storage-service/client", - "state-sync/storage-service/server", - "state-sync/storage-service/types", - "storage/accumulator", - "storage/aptosdb", - "storage/backup/backup-cli", - "storage/backup/backup-service", - "storage/db-tool", - "storage/executable-store", - "storage/indexer", - "storage/indexer_schemas", - "storage/jellyfish-merkle", - "storage/rocksdb-options", - "storage/schemadb", - "storage/scratchpad", - "storage/storage-interface", - "testsuite/dos/http_test", - "testsuite/dos/listener", - "testsuite/dos/sender", - "testsuite/forge", - "testsuite/forge-cli", - "testsuite/fuzzer", - "testsuite/fuzzer/fuzz", - "testsuite/module-publish", - "testsuite/smoke-test", - "testsuite/testcases", - "third_party/move/evm/exec-utils", - "third_party/move/evm/extract-ethereum-abi", - # third_party/move - "third_party/move/extensions/async/move-async-vm", - "third_party/move/extensions/move-table-extension", - "third_party/move/move-binary-format", - "third_party/move/move-binary-format/serializer-tests", - "third_party/move/move-borrow-graph", - "third_party/move/move-bytecode-spec", - "third_party/move/move-bytecode-verifier", - "third_party/move/move-bytecode-verifier/bytecode-verifier-tests", - "third_party/move/move-bytecode-verifier/fuzz", - "third_party/move/move-bytecode-verifier/invalid-mutations", - "third_party/move/move-bytecode-verifier/transactional-tests", - "third_party/move/move-command-line-common", - "third_party/move/move-compiler", - "third_party/move/move-compiler-v2", - "third_party/move/move-compiler-v2/tools/testdiff", - "third_party/move/move-compiler-v2/transactional-tests", - "third_party/move/move-compiler/transactional-tests", - "third_party/move/move-core/types", - "third_party/move/move-examples", - "third_party/move/move-ir-compiler", - "third_party/move/move-ir-compiler/move-bytecode-source-map", - "third_party/move/move-ir-compiler/move-ir-to-bytecode", - "third_party/move/move-ir-compiler/move-ir-to-bytecode/syntax", - "third_party/move/move-ir-compiler/transactional-tests", - "third_party/move/move-ir/types", - "third_party/move/move-model", - "third_party/move/move-model/bytecode", - "third_party/move/move-model/bytecode-test-utils", - "third_party/move/move-prover", - "third_party/move/move-prover/boogie-backend", - "third_party/move/move-prover/bytecode-pipeline", - "third_party/move/move-prover/lab", - "third_party/move/move-prover/move-abigen", - "third_party/move/move-prover/move-docgen", - "third_party/move/move-prover/move-errmapgen", - "third_party/move/move-prover/test-utils", - "third_party/move/move-symbol-pool", - "third_party/move/move-vm/integration-tests", - "third_party/move/move-vm/paranoid-tests", - "third_party/move/move-vm/runtime", - "third_party/move/move-vm/test-utils", - "third_party/move/move-vm/transactional-tests", - "third_party/move/move-vm/types", - "third_party/move/testing-infra/module-generation", - "third_party/move/testing-infra/test-generation", - "third_party/move/testing-infra/transactional-test-runner", - "third_party/move/tools/move-bytecode-utils", - "third_party/move/tools/move-bytecode-viewer", - "third_party/move/tools/move-cli", - "third_party/move/tools/move-coverage", - "third_party/move/tools/move-disassembler", - "third_party/move/tools/move-explain", - "third_party/move/tools/move-package", - "third_party/move/tools/move-resource-viewer", - "third_party/move/tools/move-unit-test", - "tools/calc-dep-sizes", - "types", - "vm-validator", + "api", + "api/openapi-spec-generator", + "api/test-context", + "api/types", + "aptos-move/aptos-abstract-gas-usage", + "aptos-move/aptos-aggregator", + "aptos-move/aptos-gas-algebra", + "aptos-move/aptos-gas-meter", + "aptos-move/aptos-gas-profiling", + "aptos-move/aptos-gas-schedule", + "aptos-move/aptos-gas-schedule-updator", + "aptos-move/aptos-memory-usage-tracker", + "aptos-move/aptos-native-interface", + "aptos-move/aptos-release-builder", + "aptos-move/aptos-resource-viewer", + "aptos-move/aptos-sdk-builder", + "aptos-move/aptos-transaction-benchmarks", + "aptos-move/aptos-transactional-test-harness", + "aptos-move/aptos-validator-interface", + "aptos-move/aptos-vm", + "aptos-move/aptos-vm-logging", + "aptos-move/aptos-vm-profiling", + "aptos-move/aptos-vm-types", + "aptos-move/block-executor", + "aptos-move/e2e-benchmark", + "aptos-move/e2e-move-tests", + "aptos-move/e2e-tests", + "aptos-move/e2e-testsuite", + "aptos-move/framework", + "aptos-move/framework/cached-packages", + "aptos-move/framework/table-natives", + "aptos-move/move-examples", + "aptos-move/mvhashmap", + "aptos-move/package-builder", + "aptos-move/vm-genesis", + "aptos-move/writeset-transaction-generator", + "aptos-node", + "aptos-utils", + "config", + "config/global-constants", + "consensus", + "consensus/consensus-types", + "consensus/safety-rules", + "crates/aptos", + "crates/aptos-admin-service", + "crates/aptos-api-tester", + "crates/aptos-bcs-utils", + "crates/aptos-bitvec", + "crates/aptos-build-info", + "crates/aptos-collections", + "crates/aptos-compression", + "crates/aptos-crypto", + "crates/aptos-crypto-derive", + "crates/aptos-debugger", + "crates/aptos-dkg", + "crates/aptos-drop-helper", + "crates/aptos-enum-conversion-derive", + "crates/aptos-faucet/cli", + "crates/aptos-faucet/core", + "crates/aptos-faucet/metrics-server", + "crates/aptos-faucet/service", + "crates/aptos-genesis", + "crates/aptos-github-client", + "crates/aptos-id-generator", + "crates/aptos-infallible", + "crates/aptos-inspection-service", + "crates/aptos-jwk-consensus", + "crates/aptos-keygen", + "crates/aptos-ledger", + "crates/aptos-log-derive", + "crates/aptos-logger", + "crates/aptos-metrics-core", + "aptos-move/aptos-debugger", + "crates/aptos-network-checker", + "crates/aptos-node-identity", + "crates/aptos-openapi", + "crates/aptos-profiler", + "crates/aptos-proptest-helpers", + "crates/aptos-push-metrics", + "crates/aptos-rate-limiter", + "crates/aptos-rest-client", + "crates/aptos-retrier", + "crates/aptos-rosetta", + "crates/aptos-rosetta-cli", + "crates/aptos-runtimes", + "crates/aptos-speculative-state-helper", + "crates/aptos-system-utils", + "crates/aptos-telemetry", + "crates/aptos-telemetry-service", + "crates/aptos-temppath", + "crates/aptos-time-service", + "crates/aptos-warp-webserver", + "crates/bounded-executor", + "crates/channel", + "crates/crash-handler", + "crates/fallible", + "crates/indexer", + "crates/jwk-utils", + "crates/node-resource-metrics", + "crates/num-variants", + "crates/proxy", + "crates/reliable-broadcast", + "crates/short-hex-str", + "crates/transaction-emitter", + "crates/transaction-emitter-lib", + "crates/transaction-generator-lib", + "crates/validator-transaction-pool", + "devtools/aptos-cargo-cli", + "dkg", + "ecosystem/indexer-grpc/indexer-grpc-cache-worker", + "ecosystem/indexer-grpc/indexer-grpc-data-service", + "ecosystem/indexer-grpc/indexer-grpc-file-store", + "ecosystem/indexer-grpc/indexer-grpc-fullnode", + "ecosystem/indexer-grpc/indexer-grpc-in-memory-cache-benchmark", + "ecosystem/indexer-grpc/indexer-grpc-integration-tests", + "ecosystem/indexer-grpc/indexer-grpc-server-framework", + "ecosystem/indexer-grpc/indexer-grpc-table-info", + "ecosystem/indexer-grpc/indexer-grpc-utils", + "ecosystem/indexer-grpc/transaction-filter", + "ecosystem/node-checker", + "ecosystem/node-checker/fn-check-client", + "execution/block-partitioner", + "execution/executor", + "execution/executor-benchmark", + "execution/executor-service", + "execution/executor-test-helpers", + "execution/executor-types", + "experimental/execution/ptx-executor", + "experimental/runtimes", + "experimental/storage/layered-map", + "keyless/circuit", + "keyless/common", + "keyless/pepper/common", + "keyless/pepper/example-client-rust", + "keyless/pepper/service", + "mempool", + "network/benchmark", + "network/builder", + "network/discovery", + "network/framework", + "network/memsocket", + "network/netcore", + "peer-monitoring-service/client", + "peer-monitoring-service/server", + "peer-monitoring-service/types", + "protos/rust", + "sdk", + "secure/net", + "secure/storage", + "secure/storage/vault", + "state-sync/aptos-data-client", + "state-sync/data-streaming-service", + "state-sync/inter-component/consensus-notifications", + "state-sync/inter-component/event-notifications", + "state-sync/inter-component/mempool-notifications", + "state-sync/inter-component/storage-service-notifications", + "state-sync/state-sync-driver", + "state-sync/storage-service/client", + "state-sync/storage-service/server", + "state-sync/storage-service/types", + "storage/accumulator", + "storage/aptosdb", + "storage/backup/backup-cli", + "storage/backup/backup-service", + "storage/db-tool", + "storage/executable-store", + "storage/indexer", + "storage/indexer_schemas", + "storage/jellyfish-merkle", + "storage/rocksdb-options", + "storage/schemadb", + "storage/scratchpad", + "storage/storage-interface", + "testsuite/dos/http_test", + "testsuite/dos/listener", + "testsuite/dos/sender", + "testsuite/forge", + "testsuite/forge-cli", + "testsuite/fuzzer", + "testsuite/fuzzer/fuzz", + "testsuite/module-publish", + "testsuite/smoke-test", + "testsuite/testcases", + "third_party/move/evm/exec-utils", + "third_party/move/evm/extract-ethereum-abi", + # third_party/move + "third_party/move/extensions/async/move-async-vm", + "third_party/move/extensions/move-table-extension", + "third_party/move/move-binary-format", + "third_party/move/move-binary-format/serializer-tests", + "third_party/move/move-borrow-graph", + "third_party/move/move-bytecode-spec", + "third_party/move/move-bytecode-verifier", + "third_party/move/move-bytecode-verifier/bytecode-verifier-tests", + "third_party/move/move-bytecode-verifier/fuzz", + "third_party/move/move-bytecode-verifier/invalid-mutations", + "third_party/move/move-bytecode-verifier/transactional-tests", + "third_party/move/move-command-line-common", + "third_party/move/move-compiler", + "third_party/move/move-compiler-v2", + "third_party/move/move-compiler-v2/legacy-move-compiler", + "third_party/move/move-compiler-v2/tools/testdiff", + "third_party/move/move-compiler-v2/transactional-tests", + "third_party/move/move-compiler/transactional-tests", + "third_party/move/move-core/types", + "third_party/move/move-examples", + "third_party/move/move-ir-compiler", + "third_party/move/move-ir-compiler/move-bytecode-source-map", + "third_party/move/move-ir-compiler/move-ir-to-bytecode", + "third_party/move/move-ir-compiler/move-ir-to-bytecode/syntax", + "third_party/move/move-ir-compiler/transactional-tests", + "third_party/move/move-ir/types", + "third_party/move/move-model", + "third_party/move/move-model/bytecode", + "third_party/move/move-model/bytecode-test-utils", + "third_party/move/move-prover", + "third_party/move/move-prover/boogie-backend", + "third_party/move/move-prover/bytecode-pipeline", + "third_party/move/move-prover/lab", + "third_party/move/move-prover/move-abigen", + "third_party/move/move-prover/move-docgen", + "third_party/move/move-prover/move-errmapgen", + "third_party/move/move-prover/test-utils", + "third_party/move/move-symbol-pool", + "third_party/move/move-vm/integration-tests", + "third_party/move/move-vm/paranoid-tests", + "third_party/move/move-vm/runtime", + "third_party/move/move-vm/test-utils", + "third_party/move/move-vm/transactional-tests", + "third_party/move/move-vm/types", + "third_party/move/testing-infra/module-generation", + "third_party/move/testing-infra/test-generation", + "third_party/move/testing-infra/transactional-test-runner", + "third_party/move/tools/move-bytecode-utils", + "third_party/move/tools/move-bytecode-viewer", + "third_party/move/tools/move-cli", + "third_party/move/tools/move-coverage", + "third_party/move/tools/move-disassembler", + "third_party/move/tools/move-explain", + "third_party/move/tools/move-package", + "third_party/move/tools/move-resource-viewer", + "third_party/move/tools/move-unit-test", + "tools/calc-dep-sizes", + "types", + "vm-validator", ] # NOTE: default-members is the complete list of binaries that form the "production Aptos codebase". These members should @@ -254,14 +255,14 @@ members = [ # # For more, see the "Conditional compilation for tests" section in documentation/coding_guidelines.md. default-members = [ - "consensus/safety-rules", - "crates/aptos-faucet/service", - "crates/aptos-keygen", - "crates/aptos-rate-limiter", - "crates/transaction-emitter", - "aptos-move/framework", - "storage/backup/backup-cli", - "ecosystem/node-checker", + "consensus/safety-rules", + "crates/aptos-faucet/service", + "crates/aptos-keygen", + "crates/aptos-rate-limiter", + "crates/transaction-emitter", + "aptos-move/framework", + "storage/backup/backup-cli", + "ecosystem/node-checker", ] # All workspace members should inherit these keys @@ -427,7 +428,7 @@ aptos-telemetry-service = { path = "crates/aptos-telemetry-service" } aptos-temppath = { path = "crates/aptos-temppath" } aptos-testcases = { path = "testsuite/testcases" } aptos-time-service = { path = "crates/aptos-time-service", features = [ - "async", + "async", ] } aptos-transaction-emitter-lib = { path = "crates/transaction-emitter-lib" } aptos-transaction-generator-lib = { path = "crates/transaction-generator-lib" } @@ -534,10 +535,10 @@ determinator = "0.12.0" derive_more = "0.99.11" diesel = "2.2.3" diesel-async = { version = "0.5", features = [ - "async-connection-wrapper", - "postgres", - "bb8", - "tokio", + "async-connection-wrapper", + "postgres", + "bb8", + "tokio", ] } diesel_migrations = { version = "2.2", features = ["postgres"] } difference = "2.0.0" @@ -600,8 +601,8 @@ internment = { version = "0.5.0", features = ["arc"] } ipnet = "2.5.0" itertools = "0.12" jemallocator = { version = "0.5.0", features = [ - "profiling", - "unprefixed_malloc_on_supported_platforms", + "profiling", + "unprefixed_malloc_on_supported_platforms", ] } jemalloc-sys = "0.5.4" json-patch = "0.2.6" @@ -679,18 +680,18 @@ rand_core = "0.5.1" random_word = "0.3.0" rayon = "1.5.2" redis = { version = "0.22.3", features = [ - "tokio-comp", - "script", - "connection-manager", + "tokio-comp", + "script", + "connection-manager", ] } redis-test = { version = "0.1.1", features = ["aio"] } ref-cast = "1.0.6" regex = "1.9.3" reqwest = { version = "0.11.11", features = [ - "blocking", - "cookies", - "json", - "stream", + "blocking", + "cookies", + "json", + "stream", ] } reqwest-middleware = "0.2.0" reqwest-retry = "0.2.1" @@ -714,8 +715,8 @@ serde = { version = "1.0.193", features = ["derive", "rc"] } serde-big-array = "0.5.1" serde_bytes = "0.11.6" serde_json = { version = "1.0.81", features = [ - "preserve_order", - "arbitrary_precision", + "preserve_order", + "arbitrary_precision", ] } # Note: arbitrary_precision is required to parse u256 in JSON serde_repr = "0.1" serde_merge = "0.1.3" @@ -761,12 +762,12 @@ tokio-test = "0.4.1" tokio-util = { version = "0.7.2", features = ["compat", "codec"] } toml = "0.8" tonic = { version = "0.11.0", features = [ - "tls-roots", - "transport", - "prost", - "gzip", - "codegen", - "zstd", + "tls-roots", + "transport", + "prost", + "gzip", + "codegen", + "zstd", ] } tonic-reflection = "0.11.0" triomphe = "0.1.9" @@ -775,8 +776,8 @@ typed-arena = "2.0.2" typenum = "1.17.0" uint = "0.9.4" ureq = { version = "1.5.4", features = [ - "json", - "native-tls", + "json", + "native-tls", ], default_features = false } url = { version = "2.4.0", features = ["serde"] } uuid = { version = "1.0.0", features = ["v4", "serde"] } @@ -799,6 +800,7 @@ move-bytecode-utils = { path = "third_party/move/tools/move-bytecode-utils" } move-cli = { path = "third_party/move/tools/move-cli" } move-command-line-common = { path = "third_party/move/move-command-line-common" } move-coverage = { path = "third_party/move/tools/move-coverage" } +legacy-move-compiler = { path = "third_party/move/move-compiler-v2/legacy-move-compiler" } move-compiler = { path = "third_party/move/move-compiler" } move-compiler-v2 = { path = "third_party/move/move-compiler-v2" } move-core-types = { path = "third_party/move/move-core/types" } @@ -817,16 +819,17 @@ move-stackless-bytecode-test-utils = { path = "third_party/move/move-model/bytec aptos-move-stdlib = { path = "aptos-move/framework/move-stdlib" } aptos-table-natives = { path = "aptos-move/framework/table-natives" } move-prover-test-utils = { path = "third_party/move/move-prover/test-utils" } +move-prover-lab = { path = "third_party/move/move-prover/lab" } move-resource-viewer = { path = "third_party/move/tools/move-resource-viewer" } move-symbol-pool = { path = "third_party/move/move-symbol-pool" } move-table-extension = { path = "third_party/move/extensions/move-table-extension" } move-transactional-test-runner = { path = "third_party/move/testing-infra/transactional-test-runner" } move-unit-test = { path = "third_party/move/tools/move-unit-test", features = [ - "table-extension", + "table-extension", ] } move-vm-runtime = { path = "third_party/move/move-vm/runtime" } move-vm-test-utils = { path = "third_party/move/move-vm/test-utils", features = [ - "table-extension", + "table-extension", ] } move-vm-types = { path = "third_party/move/move-vm/types" } diff --git a/aptos-move/framework/Cargo.toml b/aptos-move/framework/Cargo.toml index 3d5bfd0f98501..f8088c64eb201 100644 --- a/aptos-move/framework/Cargo.toml +++ b/aptos-move/framework/Cargo.toml @@ -44,14 +44,14 @@ either = { workspace = true } flate2 = { workspace = true } hex = { workspace = true } itertools = { workspace = true } +legacy-move-compiler = { workspace = true } libsecp256k1 = { workspace = true } log = { workspace = true } -lru = { workspace = true } merlin = { workspace = true } move-binary-format = { workspace = true } move-cli = { workspace = true } move-command-line-common = { workspace = true } -move-compiler = { workspace = true } +move-compiler-v2 = { workspace = true } move-core-types = { workspace = true } move-docgen = { workspace = true } move-model = { workspace = true } @@ -59,6 +59,7 @@ move-package = { workspace = true } move-prover = { workspace = true } move-prover-boogie-backend = { workspace = true } move-prover-bytecode-pipeline = { workspace = true } +move-prover-lab = { workspace = true } move-stackless-bytecode = { workspace = true } move-vm-runtime = { workspace = true } move-vm-types = { workspace = true } @@ -75,8 +76,8 @@ sha3 = { workspace = true } siphasher = { workspace = true } smallvec = { workspace = true } tempfile = { workspace = true } -thiserror = { workspace = true } tiny-keccak = { workspace = true } +toml = { workspace = true } [dev-dependencies] aptos-aggregator = { workspace = true, features = ["testing"] } diff --git a/aptos-move/framework/README-MIRROR-REPO.md b/aptos-move/framework/README-MIRROR-REPO.md new file mode 100644 index 0000000000000..3cdda618087db --- /dev/null +++ b/aptos-move/framework/README-MIRROR-REPO.md @@ -0,0 +1,24 @@ +# The Aptos Framework Repo + +This repository serves as a mirror for the Aptos Framework packages, including the Move standard library. The contents are synced from [aptos-core](https://github.com/aptos-labs/aptos-core) on an hourly basis. + +By pulling dependencies from this mirror repository, developers can avoid downloading unnecessary data, reducing build time significantly. + +## Usage +To use the packages in this repository as dependencies in your Move project, you can include them in your move.toml file by adding them as Git dependencies. + +To add a dependency from this repository, include the following in your `move.toml` file: +``` +[dependencies] + = { git = "https://github.com/aptos-labs/aptos-framework.git", subdir = "", rev = "" } +``` +For example, to add `AptosFramework` from the `mainnet` branch, you would use: +``` +AptosFramework = { git = "https://github.com/aptos-labs/aptos-framework.git", subdir = "aptos-framework", rev = "mainnet" } +``` +Make sure to replace `subdir` with the appropriate path if you are referencing a different package within the framework. + +## Contributing +If you want to contribute to the development of the framework, please submit issues and pull requests to the [aptos-core](https://github.com/aptos-labs/aptos-core) repository, where active development happens. + +Bugs, feature requests, or discussions of enhancements will be tracked in the issue section there as well. This repository is a mirror, and issues will not be tracked here. diff --git a/aptos-move/framework/README.md b/aptos-move/framework/README.md index d308719bb50e0..d7c27d0518b52 100644 --- a/aptos-move/framework/README.md +++ b/aptos-move/framework/README.md @@ -50,7 +50,7 @@ To skip the Move prover tests, run: cargo test -- --skip prover ``` -To filter and run only the tests in specific packages (e.g., `aptos_stdlib`), run: +To filter and run **all** the tests in specific packages (e.g., `aptos_stdlib`), run: ``` cargo test -- aptos_stdlib --skip prover @@ -58,6 +58,23 @@ cargo test -- aptos_stdlib --skip prover (See tests in `tests/move_unit_test.rs` to determine which filter to use; e.g., to run the tests in `aptos_framework` you must filter by `move_framework`.) +To **filter by test name or module name** in a specific package (e.g., run the `test_empty_range_proof` in `aptos_stdlib::ristretto255_bulletproofs`), run: + +``` +TEST_FILTER="test_range_proof" cargo test -- aptos_stdlib --skip prover +``` + +Or, e.g., run all the Bulletproof tests: +``` +TEST_FILTER="bulletproofs" cargo test -- aptos_stdlib --skip prover +``` + +To show the amount of time and gas used in every test, set env var `REPORT_STATS=1`. +E.g., +``` +REPORT_STATS=1 TEST_FILTER="bulletproofs" cargo test -- aptos_stdlib --skip prover +``` + Sometimes, Rust runs out of stack memory in dev build mode. You can address this by either: 1. Adjusting the stack size @@ -79,7 +96,7 @@ The overall structure of the Aptos Framework is as follows: ├── aptos-token # Sources, testing and generated documentation for Aptos token component ├── aptos-stdlib # Sources, testing and generated documentation for Aptos stdlib component ├── move-stdlib # Sources, testing and generated documentation for Move stdlib component -├── cached-packages # Tooling to generate SDK from mvoe sources. +├── cached-packages # Tooling to generate SDK from move sources. ├── src # Compilation and generation of information from Move source files in the Aptos Framework. Not designed to be used as a Rust library ├── releases # Move release bundles └── tests diff --git a/aptos-move/framework/aptos-experimental/Move.toml b/aptos-move/framework/aptos-experimental/Move.toml new file mode 100644 index 0000000000000..1d5a6845b0d29 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/Move.toml @@ -0,0 +1,9 @@ +[package] +name = "AptosExperimental" +version = "1.0.0" + +[addresses] +aptos_experimental = "0x7" + +[dependencies] +AptosFramework = { local = "../aptos-framework" } diff --git a/aptos-move/framework/aptos-experimental/doc/active_order_book.md b/aptos-move/framework/aptos-experimental/doc/active_order_book.md new file mode 100644 index 0000000000000..aaafb50fa6f7b --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/active_order_book.md @@ -0,0 +1,857 @@ + + + +# Module `0x7::active_order_book` + +(work in progress) + + +- [Struct `ActiveBidKey`](#0x7_active_order_book_ActiveBidKey) +- [Struct `ActiveBidData`](#0x7_active_order_book_ActiveBidData) +- [Enum `ActiveOrderBook`](#0x7_active_order_book_ActiveOrderBook) +- [Constants](#@Constants_0) +- [Function `new_active_order_book`](#0x7_active_order_book_new_active_order_book) +- [Function `best_bid_price`](#0x7_active_order_book_best_bid_price) +- [Function `best_ask_price`](#0x7_active_order_book_best_ask_price) +- [Function `get_mid_price`](#0x7_active_order_book_get_mid_price) +- [Function `get_slippage_price`](#0x7_active_order_book_get_slippage_price) +- [Function `get_impact_bid_price`](#0x7_active_order_book_get_impact_bid_price) +- [Function `get_impact_ask_price`](#0x7_active_order_book_get_impact_ask_price) +- [Function `get_tie_breaker`](#0x7_active_order_book_get_tie_breaker) +- [Function `cancel_active_order`](#0x7_active_order_book_cancel_active_order) +- [Function `is_active_order`](#0x7_active_order_book_is_active_order) +- [Function `is_taker_order`](#0x7_active_order_book_is_taker_order) +- [Function `single_match_with_current_active_order`](#0x7_active_order_book_single_match_with_current_active_order) +- [Function `get_single_match_for_buy_order`](#0x7_active_order_book_get_single_match_for_buy_order) +- [Function `get_single_match_for_sell_order`](#0x7_active_order_book_get_single_match_for_sell_order) +- [Function `get_single_match_result`](#0x7_active_order_book_get_single_match_result) +- [Function `increase_order_size`](#0x7_active_order_book_increase_order_size) +- [Function `decrease_order_size`](#0x7_active_order_book_decrease_order_size) +- [Function `place_maker_order`](#0x7_active_order_book_place_maker_order) + + +
use 0x1::big_ordered_map;
+use 0x1::error;
+use 0x1::option;
+use 0x7::order_book_types;
+
+ + + + + +## Struct `ActiveBidKey` + + + +
struct ActiveBidKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+price: u64 +
+
+ +
+
+tie_breaker: order_book_types::UniqueIdxType +
+
+ +
+
+ + +
+ + + +## Struct `ActiveBidData` + + + +
struct ActiveBidData has copy, drop, store
+
+ + + +
+Fields + + +
+
+order_id: order_book_types::OrderIdType +
+
+ +
+
+size: u64 +
+
+ +
+
+ + +
+ + + +## Enum `ActiveOrderBook` + +OrderBook tracking active (i.e. unconditional, immediately executable) limit orders. + +- invariant - all buys are smaller than sells, at all times. +- tie_breaker in sells is U256_MAX-value, to make sure largest value in the book +that is taken first, is the one inserted first, amongst those with same bid price. + + +
enum ActiveOrderBook has store
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+buys: big_ordered_map::BigOrderedMap<active_order_book::ActiveBidKey, active_order_book::ActiveBidData> +
+
+ +
+
+sells: big_ordered_map::BigOrderedMap<active_order_book::ActiveBidKey, active_order_book::ActiveBidData> +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Constants + + + + +There is a code bug that breaks internal invariant + + +
const EINTERNAL_INVARIANT_BROKEN: u64 = 2;
+
+ + + + + + + +
const U256_MAX: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
+
+ + + + + + + +
const EINVALID_MAKER_ORDER: u64 = 1;
+
+ + + + + +========= Active OrderBook =========== + + +
const U64_MAX: u64 = 18446744073709551615;
+
+ + + + + +## Function `new_active_order_book` + + + +
public fun new_active_order_book(): active_order_book::ActiveOrderBook
+
+ + + +
+Implementation + + +
public fun new_active_order_book(): ActiveOrderBook {
+    // potentially add max value to both sides (that will be skipped),
+    // so that max_key never changes, and doesn't create conflict.
+    ActiveOrderBook::V1 {
+        buys: new_default_big_ordered_map(),
+        sells: new_default_big_ordered_map()
+    }
+}
+
+ + + +
+ + + +## Function `best_bid_price` + +Picks the best (i.e. highest) bid (i.e. buy) price from the active order book. +aborts if there are no buys + + +
public fun best_bid_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun best_bid_price(self: &ActiveOrderBook): Option<u64> {
+    if (self.buys.is_empty()) {
+        option::none()
+    } else {
+        let (back_key, _back_value) = self.buys.borrow_back();
+        option::some(back_key.price)
+    }
+}
+
+ + + +
+ + + +## Function `best_ask_price` + +Picks the best (i.e. lowest) ask (i.e. sell) price from the active order book. +aborts if there are no sells + + +
public fun best_ask_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun best_ask_price(self: &ActiveOrderBook): Option<u64> {
+    if (self.sells.is_empty()) {
+        option::none()
+    } else {
+        let (front_key, _front_value) = self.sells.borrow_front();
+        option::some(front_key.price)
+    }
+}
+
+ + + +
+ + + +## Function `get_mid_price` + + + +
public fun get_mid_price(self: &active_order_book::ActiveOrderBook): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun get_mid_price(self: &ActiveOrderBook): Option<u64> {
+    let best_bid = self.best_bid_price();
+    let best_ask = self.best_ask_price();
+    if (best_bid.is_none() || best_ask.is_none()) {
+        option::none()
+    } else {
+        option::some(
+            (best_bid.destroy_some() + best_ask.destroy_some()) / 2
+        )
+    }
+}
+
+ + + +
+ + + +## Function `get_slippage_price` + + + +
public fun get_slippage_price(self: &active_order_book::ActiveOrderBook, is_buy: bool, slippage_pct: u64): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun get_slippage_price(
+    self: &ActiveOrderBook, is_buy: bool, slippage_pct: u64
+): Option<u64> {
+    let mid_price = self.get_mid_price();
+    if (mid_price.is_none()) {
+        return option::none();
+    };
+    let mid_price = mid_price.destroy_some();
+    let slippage = mul_div(
+        mid_price, slippage_pct, get_slippage_pct_precision() * 100
+    );
+    if (is_buy) {
+        option::some(mid_price + slippage)
+    } else {
+        option::some(mid_price - slippage)
+    }
+}
+
+ + + +
+ + + +## Function `get_impact_bid_price` + + + +
fun get_impact_bid_price(self: &active_order_book::ActiveOrderBook, impact_size: u64): option::Option<u64>
+
+ + + +
+Implementation + + +
fun get_impact_bid_price(self: &ActiveOrderBook, impact_size: u64): Option<u64> {
+    let total_value = (0 as u128);
+    let total_size = 0;
+    let orders = &self.buys;
+    if (orders.is_empty()) {
+        return option::none();
+    };
+    let (front_key, front_value) = orders.borrow_back();
+    while (total_size < impact_size) {
+        let matched_size =
+            if (total_size + front_value.size > impact_size) {
+                impact_size - total_size
+            } else {
+                front_value.size
+            };
+        total_value = total_value
+            + (matched_size as u128) * (front_key.price as u128);
+        total_size = total_size + matched_size;
+        let next_key = orders.prev_key(&front_key);
+        if (next_key.is_none()) {
+            // TODO maybe we should return none if there is not enough depth?
+            break;
+        };
+        front_key = next_key.destroy_some();
+        front_value = orders.borrow(&front_key);
+    };
+    option::some((total_value / (total_size as u128)) as u64)
+}
+
+ + + +
+ + + +## Function `get_impact_ask_price` + + + +
fun get_impact_ask_price(self: &active_order_book::ActiveOrderBook, impact_size: u64): option::Option<u64>
+
+ + + +
+Implementation + + +
fun get_impact_ask_price(self: &ActiveOrderBook, impact_size: u64): Option<u64> {
+    let total_value = 0 as u128;
+    let total_size = 0;
+    let orders = &self.sells;
+    if (orders.is_empty()) {
+        return option::none();
+    };
+    let (front_key, front_value) = orders.borrow_front();
+    while (total_size < impact_size) {
+        let matched_size =
+            if (total_size + front_value.size > impact_size) {
+                impact_size - total_size
+            } else {
+                front_value.size
+            };
+        total_value = total_value
+            + (matched_size as u128) * (front_key.price as u128);
+        total_size = total_size + matched_size;
+        let next_key = orders.next_key(&front_key);
+        if (next_key.is_none()) {
+            break;
+        };
+        front_key = next_key.destroy_some();
+        front_value = orders.borrow(&front_key);
+    };
+    option::some((total_value / (total_size as u128)) as u64)
+}
+
+ + + +
+ + + +## Function `get_tie_breaker` + + + +
fun get_tie_breaker(unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool): order_book_types::UniqueIdxType
+
+ + + +
+Implementation + + +
inline fun get_tie_breaker(
+    unique_priority_idx: UniqueIdxType, is_buy: bool
+): UniqueIdxType {
+    if (is_buy) {
+        unique_priority_idx
+    } else {
+        unique_priority_idx.descending_idx()
+    }
+}
+
+ + + +
+ + + +## Function `cancel_active_order` + + + +
public fun cancel_active_order(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool): u64
+
+ + + +
+Implementation + + +
public fun cancel_active_order(
+    self: &mut ActiveOrderBook,
+    price: u64,
+    unique_priority_idx: UniqueIdxType,
+    is_buy: bool
+): u64 {
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let key = ActiveBidKey { price: price, tie_breaker };
+    let value =
+        if (is_buy) {
+            self.buys.remove(&key)
+        } else {
+            self.sells.remove(&key)
+        };
+    value.size
+}
+
+ + + +
+ + + +## Function `is_active_order` + + + +
public fun is_active_order(self: &active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool): bool
+
+ + + +
+Implementation + + +
public fun is_active_order(
+    self: &ActiveOrderBook,
+    price: u64,
+    unique_priority_idx: UniqueIdxType,
+    is_buy: bool
+): bool {
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let key = ActiveBidKey { price: price, tie_breaker };
+    if (is_buy) {
+        self.buys.contains(&key)
+    } else {
+        self.sells.contains(&key)
+    }
+}
+
+ + + +
+ + + +## Function `is_taker_order` + +Check if the order is a taker order - i.e. if it can be immediately matched with the order book fully or partially. + + +
public fun is_taker_order(self: &active_order_book::ActiveOrderBook, price: u64, is_buy: bool): bool
+
+ + + +
+Implementation + + +
public fun is_taker_order(
+    self: &ActiveOrderBook, price: u64, is_buy: bool
+): bool {
+    if (is_buy) {
+        let best_ask_price = self.best_ask_price();
+        best_ask_price.is_some() && price >= best_ask_price.destroy_some()
+    } else {
+        let best_bid_price = self.best_bid_price();
+        best_bid_price.is_some() && price <= best_bid_price.destroy_some()
+    }
+}
+
+ + + +
+ + + +## Function `single_match_with_current_active_order` + + + +
fun single_match_with_current_active_order(remaining_size: u64, cur_key: active_order_book::ActiveBidKey, cur_value: active_order_book::ActiveBidData, orders: &mut big_ordered_map::BigOrderedMap<active_order_book::ActiveBidKey, active_order_book::ActiveBidData>): order_book_types::ActiveMatchedOrder
+
+ + + +
+Implementation + + +
fun single_match_with_current_active_order(
+    remaining_size: u64,
+    cur_key: ActiveBidKey,
+    cur_value: ActiveBidData,
+    orders: &mut BigOrderedMap<ActiveBidKey, ActiveBidData>
+): ActiveMatchedOrder {
+    let is_cur_match_fully_consumed = cur_value.size <= remaining_size;
+
+    let matched_size_for_this_order =
+        if (is_cur_match_fully_consumed) {
+            cur_value.size
+        } else {
+            remaining_size
+        };
+
+    let result =
+        new_active_matched_order(
+            cur_value.order_id,
+            matched_size_for_this_order, // Matched size on the maker order
+            cur_value.size - matched_size_for_this_order // Remaining size on the maker order
+        );
+
+    if (is_cur_match_fully_consumed) {
+        orders.remove(&cur_key);
+    } else {
+        orders.borrow_mut(&cur_key).size -= matched_size_for_this_order;
+    };
+    result
+}
+
+ + + +
+ + + +## Function `get_single_match_for_buy_order` + + + +
fun get_single_match_for_buy_order(self: &mut active_order_book::ActiveOrderBook, price: u64, size: u64): order_book_types::ActiveMatchedOrder
+
+ + + +
+Implementation + + +
fun get_single_match_for_buy_order(
+    self: &mut ActiveOrderBook, price: u64, size: u64
+): ActiveMatchedOrder {
+    let (smallest_key, smallest_value) = self.sells.borrow_front();
+    assert!(price >= smallest_key.price, EINTERNAL_INVARIANT_BROKEN);
+    single_match_with_current_active_order(
+        size,
+        smallest_key,
+        *smallest_value,
+        &mut self.sells
+    )
+}
+
+ + + +
+ + + +## Function `get_single_match_for_sell_order` + + + +
fun get_single_match_for_sell_order(self: &mut active_order_book::ActiveOrderBook, price: u64, size: u64): order_book_types::ActiveMatchedOrder
+
+ + + +
+Implementation + + +
fun get_single_match_for_sell_order(
+    self: &mut ActiveOrderBook, price: u64, size: u64
+): ActiveMatchedOrder {
+    let (largest_key, largest_value) = self.buys.borrow_back();
+    assert!(price <= largest_key.price, EINTERNAL_INVARIANT_BROKEN);
+    single_match_with_current_active_order(
+        size,
+        largest_key,
+        *largest_value,
+        &mut self.buys
+    )
+}
+
+ + + +
+ + + +## Function `get_single_match_result` + + + +
public fun get_single_match_result(self: &mut active_order_book::ActiveOrderBook, price: u64, size: u64, is_buy: bool): order_book_types::ActiveMatchedOrder
+
+ + + +
+Implementation + + +
public fun get_single_match_result(
+    self: &mut ActiveOrderBook,
+    price: u64,
+    size: u64,
+    is_buy: bool
+): ActiveMatchedOrder {
+    if (is_buy) {
+        self.get_single_match_for_buy_order(price, size)
+    } else {
+        self.get_single_match_for_sell_order(price, size)
+    }
+}
+
+ + + +
+ + + +## Function `increase_order_size` + +Increase the size of the order in the orderbook without altering its position in the price-time priority. + + +
public fun increase_order_size(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size_delta: u64, is_buy: bool)
+
+ + + +
+Implementation + + +
public fun increase_order_size(
+    self: &mut ActiveOrderBook,
+    price: u64,
+    unique_priority_idx: UniqueIdxType,
+    size_delta: u64,
+    is_buy: bool
+) {
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let key = ActiveBidKey { price, tie_breaker };
+    if (is_buy) {
+        self.buys.borrow_mut(&key).size += size_delta;
+    } else {
+        self.sells.borrow_mut(&key).size += size_delta;
+    };
+}
+
+ + + +
+ + + +## Function `decrease_order_size` + +Decrease the size of the order in the order book without altering its position in the price-time priority. + + +
public fun decrease_order_size(self: &mut active_order_book::ActiveOrderBook, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size_delta: u64, is_buy: bool)
+
+ + + +
+Implementation + + +
public fun decrease_order_size(
+    self: &mut ActiveOrderBook,
+    price: u64,
+    unique_priority_idx: UniqueIdxType,
+    size_delta: u64,
+    is_buy: bool
+) {
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let key = ActiveBidKey { price, tie_breaker };
+    if (is_buy) {
+        self.buys.borrow_mut(&key).size -= size_delta;
+    } else {
+        self.sells.borrow_mut(&key).size -= size_delta;
+    };
+}
+
+ + + +
+ + + +## Function `place_maker_order` + + + +
public fun place_maker_order(self: &mut active_order_book::ActiveOrderBook, order_id: order_book_types::OrderIdType, price: u64, unique_priority_idx: order_book_types::UniqueIdxType, size: u64, is_buy: bool)
+
+ + + +
+Implementation + + +
public fun place_maker_order(
+    self: &mut ActiveOrderBook,
+    order_id: OrderIdType,
+    price: u64,
+    unique_priority_idx: UniqueIdxType,
+    size: u64,
+    is_buy: bool
+) {
+    let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy);
+    let key = ActiveBidKey { price, tie_breaker };
+    let value = ActiveBidData { order_id, size };
+    // Assert that this is not a taker order
+    assert!(!self.is_taker_order(price, is_buy), EINVALID_MAKER_ORDER);
+    if (is_buy) {
+        self.buys.add(key, value);
+    } else {
+        self.sells.add(key, value);
+    };
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/benchmark_utils.md b/aptos-move/framework/aptos-experimental/doc/benchmark_utils.md new file mode 100644 index 0000000000000..828f397918990 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/benchmark_utils.md @@ -0,0 +1,46 @@ + + + +# Module `0x7::benchmark_utils` + + + +- [Function `transfer_and_create_account`](#0x7_benchmark_utils_transfer_and_create_account) + + +
use 0x1::account;
+use 0x1::aptos_account;
+
+ + + + + +## Function `transfer_and_create_account` + +Entry function that creates account resource, and funds the account. +This makes sure that transactions later don't need to create an account, +and so actual costs of entry functions can be more precisely measured. + + +
entry fun transfer_and_create_account(source: &signer, to: address, amount: u64)
+
+ + + +
+Implementation + + +
entry fun transfer_and_create_account(source: &signer, to: address, amount: u64) {
+    account::create_account_if_does_not_exist(to);
+    aptos_account::transfer(source, to, amount);
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/confidential_asset.md b/aptos-move/framework/aptos-experimental/doc/confidential_asset.md new file mode 100644 index 0000000000000..3d0b0c0c23344 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/confidential_asset.md @@ -0,0 +1,2488 @@ + + + +# Module `0x7::confidential_asset` + +This module implements the Confidential Asset (CA) Standard, a privacy-focused protocol for managing fungible assets (FA). +It enables private transfers by obfuscating token amounts while keeping sender and recipient addresses visible. + + +- [Resource `ConfidentialAssetStore`](#0x7_confidential_asset_ConfidentialAssetStore) +- [Resource `FAController`](#0x7_confidential_asset_FAController) +- [Resource `FAConfig`](#0x7_confidential_asset_FAConfig) +- [Struct `Deposited`](#0x7_confidential_asset_Deposited) +- [Struct `Withdrawn`](#0x7_confidential_asset_Withdrawn) +- [Struct `Transferred`](#0x7_confidential_asset_Transferred) +- [Constants](#@Constants_0) +- [Function `init_module`](#0x7_confidential_asset_init_module) +- [Function `register`](#0x7_confidential_asset_register) +- [Function `deposit_to`](#0x7_confidential_asset_deposit_to) +- [Function `deposit`](#0x7_confidential_asset_deposit) +- [Function `deposit_coins_to`](#0x7_confidential_asset_deposit_coins_to) +- [Function `deposit_coins`](#0x7_confidential_asset_deposit_coins) +- [Function `withdraw_to`](#0x7_confidential_asset_withdraw_to) +- [Function `withdraw`](#0x7_confidential_asset_withdraw) +- [Function `confidential_transfer`](#0x7_confidential_asset_confidential_transfer) +- [Function `rotate_encryption_key`](#0x7_confidential_asset_rotate_encryption_key) +- [Function `normalize`](#0x7_confidential_asset_normalize) +- [Function `freeze_token`](#0x7_confidential_asset_freeze_token) +- [Function `unfreeze_token`](#0x7_confidential_asset_unfreeze_token) +- [Function `rollover_pending_balance`](#0x7_confidential_asset_rollover_pending_balance) +- [Function `rollover_pending_balance_and_freeze`](#0x7_confidential_asset_rollover_pending_balance_and_freeze) +- [Function `rotate_encryption_key_and_unfreeze`](#0x7_confidential_asset_rotate_encryption_key_and_unfreeze) +- [Function `enable_allow_list`](#0x7_confidential_asset_enable_allow_list) +- [Function `disable_allow_list`](#0x7_confidential_asset_disable_allow_list) +- [Function `enable_token`](#0x7_confidential_asset_enable_token) +- [Function `disable_token`](#0x7_confidential_asset_disable_token) +- [Function `set_auditor`](#0x7_confidential_asset_set_auditor) +- [Function `has_confidential_asset_store`](#0x7_confidential_asset_has_confidential_asset_store) +- [Function `is_token_allowed`](#0x7_confidential_asset_is_token_allowed) +- [Function `is_allow_list_enabled`](#0x7_confidential_asset_is_allow_list_enabled) +- [Function `pending_balance`](#0x7_confidential_asset_pending_balance) +- [Function `actual_balance`](#0x7_confidential_asset_actual_balance) +- [Function `encryption_key`](#0x7_confidential_asset_encryption_key) +- [Function `is_normalized`](#0x7_confidential_asset_is_normalized) +- [Function `is_frozen`](#0x7_confidential_asset_is_frozen) +- [Function `get_auditor`](#0x7_confidential_asset_get_auditor) +- [Function `confidential_asset_balance`](#0x7_confidential_asset_confidential_asset_balance) +- [Function `register_internal`](#0x7_confidential_asset_register_internal) +- [Function `deposit_to_internal`](#0x7_confidential_asset_deposit_to_internal) +- [Function `withdraw_to_internal`](#0x7_confidential_asset_withdraw_to_internal) +- [Function `confidential_transfer_internal`](#0x7_confidential_asset_confidential_transfer_internal) +- [Function `rotate_encryption_key_internal`](#0x7_confidential_asset_rotate_encryption_key_internal) +- [Function `normalize_internal`](#0x7_confidential_asset_normalize_internal) +- [Function `rollover_pending_balance_internal`](#0x7_confidential_asset_rollover_pending_balance_internal) +- [Function `freeze_token_internal`](#0x7_confidential_asset_freeze_token_internal) +- [Function `unfreeze_token_internal`](#0x7_confidential_asset_unfreeze_token_internal) +- [Function `ensure_fa_config_exists`](#0x7_confidential_asset_ensure_fa_config_exists) +- [Function `get_fa_store_signer`](#0x7_confidential_asset_get_fa_store_signer) +- [Function `get_fa_store_address`](#0x7_confidential_asset_get_fa_store_address) +- [Function `get_user_signer`](#0x7_confidential_asset_get_user_signer) +- [Function `get_user_address`](#0x7_confidential_asset_get_user_address) +- [Function `get_fa_config_signer`](#0x7_confidential_asset_get_fa_config_signer) +- [Function `get_fa_config_address`](#0x7_confidential_asset_get_fa_config_address) +- [Function `construct_user_seed`](#0x7_confidential_asset_construct_user_seed) +- [Function `construct_fa_seed`](#0x7_confidential_asset_construct_fa_seed) +- [Function `validate_auditors`](#0x7_confidential_asset_validate_auditors) +- [Function `deserialize_auditor_eks`](#0x7_confidential_asset_deserialize_auditor_eks) +- [Function `deserialize_auditor_amounts`](#0x7_confidential_asset_deserialize_auditor_amounts) +- [Function `ensure_sufficient_fa`](#0x7_confidential_asset_ensure_sufficient_fa) + + +
use 0x1::bcs;
+use 0x1::chain_id;
+use 0x1::coin;
+use 0x1::dispatchable_fungible_asset;
+use 0x1::error;
+use 0x1::event;
+use 0x1::fungible_asset;
+use 0x1::object;
+use 0x1::option;
+use 0x1::primary_fungible_store;
+use 0x1::ristretto255;
+use 0x1::ristretto255_bulletproofs;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::string_utils;
+use 0x1::system_addresses;
+use 0x1::vector;
+use 0x7::confidential_balance;
+use 0x7::confidential_proof;
+use 0x7::ristretto255_twisted_elgamal;
+
+ + + + + +## Resource `ConfidentialAssetStore` + +The confidential_asset module stores a ConfidentialAssetStore object for each user-token pair. + + +
struct ConfidentialAssetStore has key
+
+ + + +
+Fields + + +
+
+frozen: bool +
+
+ Indicates if the account is frozen. If true, transactions are temporarily disabled + for this account. This is particularly useful during key rotations, which require + two transactions: rolling over the pending balance to the actual balance and rotating + the encryption key. Freezing prevents the user from accepting additional payments + between these two transactions. +
+
+normalized: bool +
+
+ A flag indicating whether the actual balance is normalized. A normalized balance + ensures that all chunks fit within the defined 16-bit bounds, preventing overflows. +
+
+pending_counter: u64 +
+
+ Tracks the maximum number of transactions the user can accept before normalization + is required. For example, if the user can accept up to 2^16 transactions and each + chunk has a 16-bit limit, the maximum chunk value before normalization would be + 2^16 * 2^16 = 2^32. Maintaining this counter is crucial because users must solve + a discrete logarithm problem of this size to decrypt their balances. +
+
+pending_balance: confidential_balance::CompressedConfidentialBalance +
+
+ Stores the user's pending balance, which is used for accepting incoming payments. + Represented as four 16-bit chunks (p0 + 2^16 * p1 + 2^32 * p2 + 2^48 * p3), that can grow up to 32 bits. + All payments are accepted into this pending balance, which users must roll over into the actual balance + to perform transactions like withdrawals or transfers. + This separation helps protect against front-running attacks, where small incoming transfers could force + frequent regenerating of zk-proofs. +
+
+actual_balance: confidential_balance::CompressedConfidentialBalance +
+
+ Represents the actual user balance, which is available for sending payments. + It consists of eight 16-bit chunks (p0 + 2^16 * p1 + ... + 2^112 * p8), supporting a 128-bit balance. + Users can decrypt this balance with their decryption keys and by solving a discrete logarithm problem. +
+
+ek: ristretto255_twisted_elgamal::CompressedPubkey +
+
+ The encryption key associated with the user's confidential asset account, different for each token. +
+
+ + +
+ + + +## Resource `FAController` + +Represents the controller for the primary FA stores and FAConfig objects. + + +
struct FAController has key
+
+ + + +
+Fields + + +
+
+allow_list_enabled: bool +
+
+ Indicates whether the allow list is enabled. If true, only tokens from the allow list can be transferred. + This flag is managed by the governance module. +
+
+extend_ref: object::ExtendRef +
+
+ Used to derive a signer that owns all the FAs' primary stores and FAConfig objects. +
+
+ + +
+ + + +## Resource `FAConfig` + +Represents the configuration of a token. + + +
struct FAConfig has key
+
+ + + +
+Fields + + +
+
+allowed: bool +
+
+ Indicates whether the token is allowed for confidential transfers. + If allow list is disabled, all tokens are allowed. + Can be toggled by the governance module. The withdrawals are always allowed. +
+
+auditor_ek: option::Option<ristretto255_twisted_elgamal::CompressedPubkey> +
+
+ The auditor's public key for the token. If the auditor is not set, this field is None. + Otherwise, each confidential transfer must include the auditor as an additional party, + alongside the recipient, who has access to the decrypted transferred amount. +
+
+ + +
+ + + +## Struct `Deposited` + +Emitted when tokens are brought into the protocol. + + +
#[event]
+struct Deposited has drop, store
+
+ + + +
+Fields + + +
+
+from: address +
+
+ +
+
+to: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Withdrawn` + +Emitted when tokens are brought out of the protocol. + + +
#[event]
+struct Withdrawn has drop, store
+
+ + + +
+Fields + + +
+
+from: address +
+
+ +
+
+to: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Transferred` + +Emitted when tokens are transferred within the protocol between users' confidential balances. +Note that a numeric amount is not included, as it is hidden. + + +
#[event]
+struct Transferred has drop, store
+
+ + + +
+Fields + + +
+
+from: address +
+
+ +
+
+to: address +
+
+ +
+
+ + +
+ + + +## Constants + + + + +An internal error occurred, indicating unexpected behavior. + + +
const EINTERNAL_ERROR: u64 = 16;
+
+ + + + + +The allow list is already disabled. + + +
const EALLOW_LIST_DISABLED: u64 = 15;
+
+ + + + + +The allow list is already enabled. + + +
const EALLOW_LIST_ENABLED: u64 = 14;
+
+ + + + + +The confidential asset account is already frozen. + + +
const EALREADY_FROZEN: u64 = 7;
+
+ + + + + +The balance is already normalized and cannot be normalized again. + + +
const EALREADY_NORMALIZED: u64 = 11;
+
+ + + + + +The deserialization of the auditor EK failed. + + +
const EAUDITOR_EK_DESERIALIZATION_FAILED: u64 = 4;
+
+ + + + + +The confidential asset store has already been published for the given user-token pair. + + +
const ECA_STORE_ALREADY_PUBLISHED: u64 = 2;
+
+ + + + + +The confidential asset store has not been published for the given user-token pair. + + +
const ECA_STORE_NOT_PUBLISHED: u64 = 3;
+
+ + + + + +The provided auditors or auditor proofs are invalid. + + +
const EINVALID_AUDITORS: u64 = 6;
+
+ + + + + +Sender and recipient amounts encrypt different transfer amounts + + +
const EINVALID_SENDER_AMOUNT: u64 = 17;
+
+ + + + + +The operation requires the actual balance to be normalized. + + +
const ENORMALIZATION_REQUIRED: u64 = 10;
+
+ + + + + +The sender is not the registered auditor. + + +
const ENOT_AUDITOR: u64 = 5;
+
+ + + + + +The confidential asset account is not frozen. + + +
const ENOT_FROZEN: u64 = 8;
+
+ + + + + +The pending balance must be zero for this operation. + + +
const ENOT_ZERO_BALANCE: u64 = 9;
+
+ + + + + +The range proof system does not support sufficient range. + + +
const ERANGE_PROOF_SYSTEM_HAS_INSUFFICIENT_RANGE: u64 = 1;
+
+ + + + + +The token is not allowed for confidential transfers. + + +
const ETOKEN_DISABLED: u64 = 13;
+
+ + + + + +The token is already allowed for confidential transfers. + + +
const ETOKEN_ENABLED: u64 = 12;
+
+ + + + + +The mainnet chain ID. If the chain ID is 1, the allow list is enabled. + + +
const MAINNET_CHAIN_ID: u8 = 1;
+
+ + + + + +The maximum number of transactions can be aggregated on the pending balance before rollover is required. + + +
const MAX_TRANSFERS_BEFORE_ROLLOVER: u64 = 65534;
+
+ + + + + +## Function `init_module` + + + +
fun init_module(deployer: &signer)
+
+ + + +
+Implementation + + +
fun init_module(deployer: &signer) {
+    assert!(
+        bulletproofs::get_max_range_bits() >= confidential_proof::get_bulletproofs_num_bits(),
+        error::internal(ERANGE_PROOF_SYSTEM_HAS_INSUFFICIENT_RANGE)
+    );
+
+    let deployer_address = signer::address_of(deployer);
+
+    let fa_controller_ctor_ref = &object::create_object(deployer_address);
+
+    move_to(deployer, FAController {
+        allow_list_enabled: chain_id::get() == MAINNET_CHAIN_ID,
+        extend_ref: object::generate_extend_ref(fa_controller_ctor_ref),
+    });
+}
+
+ + + +
+ + + +## Function `register` + +Registers an account for a specified token. Users must register an account for each token they +intend to transact with. + +Users are also responsible for generating a Twisted ElGamal key pair on their side. + + +
public entry fun register(sender: &signer, token: object::Object<fungible_asset::Metadata>, ek: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun register(
+    sender: &signer,
+    token: Object<Metadata>,
+    ek: vector<u8>) acquires FAController, FAConfig
+{
+    let ek = twisted_elgamal::new_pubkey_from_bytes(ek).extract();
+
+    register_internal(sender, token, ek);
+}
+
+ + + +
+ + + +## Function `deposit_to` + +Brings tokens into the protocol, transferring the passed amount from the sender's primary FA store +to the pending balance of the recipient. +The initial confidential balance is publicly visible, as entering the protocol requires a normal transfer. +However, tokens within the protocol become obfuscated through confidential transfers, ensuring privacy in +subsequent transactions. + + +
public entry fun deposit_to(sender: &signer, token: object::Object<fungible_asset::Metadata>, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun deposit_to(
+    sender: &signer,
+    token: Object<Metadata>,
+    to: address,
+    amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig
+{
+    deposit_to_internal(sender, token, to, amount)
+}
+
+ + + +
+ + + +## Function `deposit` + +The same as deposit_to, but the recipient is the sender. + + +
public entry fun deposit(sender: &signer, token: object::Object<fungible_asset::Metadata>, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun deposit(
+    sender: &signer,
+    token: Object<Metadata>,
+    amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig
+{
+    deposit_to_internal(sender, token, signer::address_of(sender), amount)
+}
+
+ + + +
+ + + +## Function `deposit_coins_to` + +The same as deposit_to, but converts coins to missing FA first. + + +
public entry fun deposit_coins_to<CoinType>(sender: &signer, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun deposit_coins_to<CoinType>(
+    sender: &signer,
+    to: address,
+    amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig
+{
+    let token = ensure_sufficient_fa<CoinType>(sender, amount).extract();
+
+    deposit_to_internal(sender, token, to, amount)
+}
+
+ + + +
+ + + +## Function `deposit_coins` + +The same as deposit, but converts coins to missing FA first. + + +
public entry fun deposit_coins<CoinType>(sender: &signer, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun deposit_coins<CoinType>(
+    sender: &signer,
+    amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig
+{
+    let token = ensure_sufficient_fa<CoinType>(sender, amount).extract();
+
+    deposit_to_internal(sender, token, signer::address_of(sender), amount)
+}
+
+ + + +
+ + + +## Function `withdraw_to` + +Brings tokens out of the protocol by transferring the specified amount from the sender's actual balance to +the recipient's primary FA store. +The withdrawn amount is publicly visible, as this process requires a normal transfer. +The sender provides their new normalized confidential balance, encrypted with fresh randomness to preserve privacy. + + +
public entry fun withdraw_to(sender: &signer, token: object::Object<fungible_asset::Metadata>, to: address, amount: u64, new_balance: vector<u8>, zkrp_new_balance: vector<u8>, sigma_proof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun withdraw_to(
+    sender: &signer,
+    token: Object<Metadata>,
+    to: address,
+    amount: u64,
+    new_balance: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    sigma_proof: vector<u8>) acquires ConfidentialAssetStore, FAController
+{
+    let new_balance = confidential_balance::new_actual_balance_from_bytes(new_balance).extract();
+    let proof = confidential_proof::deserialize_withdrawal_proof(sigma_proof, zkrp_new_balance).extract();
+
+    withdraw_to_internal(sender, token, to, amount, new_balance, proof);
+
+    event::emit(Withdrawn { from: signer::address_of(sender), to, amount });
+}
+
+ + + +
+ + + +## Function `withdraw` + +The same as withdraw_to, but the recipient is the sender. + + +
public entry fun withdraw(sender: &signer, token: object::Object<fungible_asset::Metadata>, amount: u64, new_balance: vector<u8>, zkrp_new_balance: vector<u8>, sigma_proof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun withdraw(
+    sender: &signer,
+    token: Object<Metadata>,
+    amount: u64,
+    new_balance: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    sigma_proof: vector<u8>) acquires ConfidentialAssetStore, FAController
+{
+    withdraw_to(
+        sender,
+        token,
+        signer::address_of(sender),
+        amount,
+        new_balance,
+        zkrp_new_balance,
+        sigma_proof
+    )
+}
+
+ + + +
+ + + +## Function `confidential_transfer` + +Transfers tokens from the sender's actual balance to the recipient's pending balance. +The function hides the transferred amount while keeping the sender and recipient addresses visible. +The sender encrypts the transferred amount with the recipient's encryption key and the function updates the +recipient's confidential balance homomorphically. +Additionally, the sender encrypts the transferred amount with the auditors' EKs, allowing auditors to decrypt +the it on their side. +The sender provides their new normalized confidential balance, encrypted with fresh randomness to preserve privacy. +Warning: If the auditor feature is enabled, the sender must include the auditor as the first element in the +auditor_eks vector. + + +
public entry fun confidential_transfer(sender: &signer, token: object::Object<fungible_asset::Metadata>, to: address, new_balance: vector<u8>, sender_amount: vector<u8>, recipient_amount: vector<u8>, auditor_eks: vector<u8>, auditor_amounts: vector<u8>, zkrp_new_balance: vector<u8>, zkrp_transfer_amount: vector<u8>, sigma_proof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun confidential_transfer(
+    sender: &signer,
+    token: Object<Metadata>,
+    to: address,
+    new_balance: vector<u8>,
+    sender_amount: vector<u8>,
+    recipient_amount: vector<u8>,
+    auditor_eks: vector<u8>,
+    auditor_amounts: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    zkrp_transfer_amount: vector<u8>,
+    sigma_proof: vector<u8>) acquires ConfidentialAssetStore, FAConfig, FAController
+{
+    let new_balance = confidential_balance::new_actual_balance_from_bytes(new_balance).extract();
+    let sender_amount = confidential_balance::new_pending_balance_from_bytes(sender_amount).extract();
+    let recipient_amount = confidential_balance::new_pending_balance_from_bytes(recipient_amount).extract();
+    let auditor_eks = deserialize_auditor_eks(auditor_eks).extract();
+    let auditor_amounts = deserialize_auditor_amounts(auditor_amounts).extract();
+    let proof = confidential_proof::deserialize_transfer_proof(
+        sigma_proof,
+        zkrp_new_balance,
+        zkrp_transfer_amount
+    ).extract();
+
+    confidential_transfer_internal(
+        sender,
+        token,
+        to,
+        new_balance,
+        sender_amount,
+        recipient_amount,
+        auditor_eks,
+        auditor_amounts,
+        proof
+    )
+}
+
+ + + +
+ + + +## Function `rotate_encryption_key` + +Rotates the encryption key for the user's confidential balance, updating it to a new encryption key. +The function ensures that the pending balance is zero before the key rotation, requiring the sender to +call rollover_pending_balance_and_freeze beforehand if necessary. +The sender provides their new normalized confidential balance, encrypted with the new encryption key and fresh randomness +to preserve privacy. + + +
public entry fun rotate_encryption_key(sender: &signer, token: object::Object<fungible_asset::Metadata>, new_ek: vector<u8>, new_balance: vector<u8>, zkrp_new_balance: vector<u8>, sigma_proof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun rotate_encryption_key(
+    sender: &signer,
+    token: Object<Metadata>,
+    new_ek: vector<u8>,
+    new_balance: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    sigma_proof: vector<u8>) acquires ConfidentialAssetStore
+{
+    let new_ek = twisted_elgamal::new_pubkey_from_bytes(new_ek).extract();
+    let new_balance = confidential_balance::new_actual_balance_from_bytes(new_balance).extract();
+    let proof = confidential_proof::deserialize_rotation_proof(sigma_proof, zkrp_new_balance).extract();
+
+    rotate_encryption_key_internal(sender, token, new_ek, new_balance, proof);
+}
+
+ + + +
+ + + +## Function `normalize` + +Adjusts each chunk to fit into defined 16-bit bounds to prevent overflows. +Most functions perform implicit normalization by accepting a new normalized confidential balance as a parameter. +However, explicit normalization is required before rolling over the pending balance, as multiple rolls may cause +chunk overflows. +The sender provides their new normalized confidential balance, encrypted with fresh randomness to preserve privacy. + + +
public entry fun normalize(sender: &signer, token: object::Object<fungible_asset::Metadata>, new_balance: vector<u8>, zkrp_new_balance: vector<u8>, sigma_proof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun normalize(
+    sender: &signer,
+    token: Object<Metadata>,
+    new_balance: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    sigma_proof: vector<u8>) acquires ConfidentialAssetStore
+{
+    let new_balance = confidential_balance::new_actual_balance_from_bytes(new_balance).extract();
+    let proof = confidential_proof::deserialize_normalization_proof(sigma_proof, zkrp_new_balance).extract();
+
+    normalize_internal(sender, token, new_balance, proof);
+}
+
+ + + +
+ + + +## Function `freeze_token` + +Freezes the confidential account for the specified token, disabling all incoming transactions. + + +
public entry fun freeze_token(sender: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public entry fun freeze_token(sender: &signer, token: Object<Metadata>) acquires ConfidentialAssetStore {
+    freeze_token_internal(sender, token);
+}
+
+ + + +
+ + + +## Function `unfreeze_token` + +Unfreezes the confidential account for the specified token, re-enabling incoming transactions. + + +
public entry fun unfreeze_token(sender: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public entry fun unfreeze_token(sender: &signer, token: Object<Metadata>) acquires ConfidentialAssetStore {
+    unfreeze_token_internal(sender, token);
+}
+
+ + + +
+ + + +## Function `rollover_pending_balance` + +Adds the pending balance to the actual balance for the specified token, resetting the pending balance to zero. +This operation is necessary to use tokens from the pending balance for outgoing transactions. + + +
public entry fun rollover_pending_balance(sender: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public entry fun rollover_pending_balance(
+    sender: &signer,
+    token: Object<Metadata>) acquires ConfidentialAssetStore
+{
+    rollover_pending_balance_internal(sender, token);
+}
+
+ + + +
+ + + +## Function `rollover_pending_balance_and_freeze` + +Before calling rotate_encryption_key, we need to rollover the pending balance and freeze the token to prevent +any new payments being come. + + +
public entry fun rollover_pending_balance_and_freeze(sender: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public entry fun rollover_pending_balance_and_freeze(
+    sender: &signer,
+    token: Object<Metadata>) acquires ConfidentialAssetStore
+{
+    rollover_pending_balance(sender, token);
+    freeze_token(sender, token);
+}
+
+ + + +
+ + + +## Function `rotate_encryption_key_and_unfreeze` + +After rotating the encryption key, we may want to unfreeze the token to allow payments. +This function facilitates making both calls in a single transaction. + + +
public entry fun rotate_encryption_key_and_unfreeze(sender: &signer, token: object::Object<fungible_asset::Metadata>, new_ek: vector<u8>, new_confidential_balance: vector<u8>, zkrp_new_balance: vector<u8>, rotate_proof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun rotate_encryption_key_and_unfreeze(
+    sender: &signer,
+    token: Object<Metadata>,
+    new_ek: vector<u8>,
+    new_confidential_balance: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    rotate_proof: vector<u8>) acquires ConfidentialAssetStore
+{
+    rotate_encryption_key(sender, token, new_ek, new_confidential_balance, zkrp_new_balance, rotate_proof);
+    unfreeze_token(sender, token);
+}
+
+ + + +
+ + + +## Function `enable_allow_list` + +Enables the allow list, restricting confidential transfers to tokens on the allow list. + + +
public fun enable_allow_list(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun enable_allow_list(aptos_framework: &signer) acquires FAController {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    let fa_controller = borrow_global_mut<FAController>(@aptos_experimental);
+
+    assert!(!fa_controller.allow_list_enabled, error::invalid_state(EALLOW_LIST_ENABLED));
+
+    fa_controller.allow_list_enabled = true;
+}
+
+ + + +
+ + + +## Function `disable_allow_list` + +Disables the allow list, allowing confidential transfers for all tokens. + + +
public fun disable_allow_list(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun disable_allow_list(aptos_framework: &signer) acquires FAController {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    let fa_controller = borrow_global_mut<FAController>(@aptos_experimental);
+
+    assert!(fa_controller.allow_list_enabled, error::invalid_state(EALLOW_LIST_DISABLED));
+
+    fa_controller.allow_list_enabled = false;
+}
+
+ + + +
+ + + +## Function `enable_token` + +Enables confidential transfers for the specified token. + + +
public fun enable_token(aptos_framework: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public fun enable_token(aptos_framework: &signer, token: Object<Metadata>) acquires FAConfig, FAController {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    let fa_config = borrow_global_mut<FAConfig>(ensure_fa_config_exists(token));
+
+    assert!(!fa_config.allowed, error::invalid_state(ETOKEN_ENABLED));
+
+    fa_config.allowed = true;
+}
+
+ + + +
+ + + +## Function `disable_token` + +Disables confidential transfers for the specified token. + + +
public fun disable_token(aptos_framework: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public fun disable_token(aptos_framework: &signer, token: Object<Metadata>) acquires FAConfig, FAController {
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    let fa_config = borrow_global_mut<FAConfig>(ensure_fa_config_exists(token));
+
+    assert!(fa_config.allowed, error::invalid_state(ETOKEN_DISABLED));
+
+    fa_config.allowed = false;
+}
+
+ + + +
+ + + +## Function `set_auditor` + +Sets the auditor's public key for the specified token. + + +
public fun set_auditor(aptos_framework: &signer, token: object::Object<fungible_asset::Metadata>, new_auditor_ek: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_auditor(
+    aptos_framework: &signer,
+    token: Object<Metadata>,
+    new_auditor_ek: vector<u8>) acquires FAConfig, FAController
+{
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    let fa_config = borrow_global_mut<FAConfig>(ensure_fa_config_exists(token));
+
+    fa_config.auditor_ek = if (new_auditor_ek.length() == 0) {
+        std::option::none()
+    } else {
+        let new_auditor_ek = twisted_elgamal::new_pubkey_from_bytes(new_auditor_ek);
+        assert!(new_auditor_ek.is_some(), error::invalid_argument(EAUDITOR_EK_DESERIALIZATION_FAILED));
+        new_auditor_ek
+    };
+}
+
+ + + +
+ + + +## Function `has_confidential_asset_store` + +Checks if the user has a confidential asset store for the specified token. + + +
#[view]
+public fun has_confidential_asset_store(user: address, token: object::Object<fungible_asset::Metadata>): bool
+
+ + + +
+Implementation + + +
public fun has_confidential_asset_store(user: address, token: Object<Metadata>): bool {
+    exists<ConfidentialAssetStore>(get_user_address(user, token))
+}
+
+ + + +
+ + + +## Function `is_token_allowed` + +Checks if the token is allowed for confidential transfers. + + +
#[view]
+public fun is_token_allowed(token: object::Object<fungible_asset::Metadata>): bool
+
+ + + +
+Implementation + + +
public fun is_token_allowed(token: Object<Metadata>): bool acquires FAController, FAConfig {
+    if (!is_allow_list_enabled()) {
+        return true
+    };
+
+    let fa_config_address = get_fa_config_address(token);
+
+    if (!exists<FAConfig>(fa_config_address)) {
+        return false
+    };
+
+    borrow_global<FAConfig>(fa_config_address).allowed
+}
+
+ + + +
+ + + +## Function `is_allow_list_enabled` + +Checks if the allow list is enabled. +If the allow list is enabled, only tokens from the allow list can be transferred. +Otherwise, all tokens are allowed. + + +
#[view]
+public fun is_allow_list_enabled(): bool
+
+ + + +
+Implementation + + +
public fun is_allow_list_enabled(): bool acquires FAController {
+    borrow_global<FAController>(@aptos_experimental).allow_list_enabled
+}
+
+ + + +
+ + + +## Function `pending_balance` + +Returns the pending balance of the user for the specified token. + + +
#[view]
+public fun pending_balance(owner: address, token: object::Object<fungible_asset::Metadata>): confidential_balance::CompressedConfidentialBalance
+
+ + + +
+Implementation + + +
public fun pending_balance(
+    owner: address,
+    token: Object<Metadata>): confidential_balance::CompressedConfidentialBalance acquires ConfidentialAssetStore
+{
+    assert!(has_confidential_asset_store(owner, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    let ca_store = borrow_global<ConfidentialAssetStore>(get_user_address(owner, token));
+
+    ca_store.pending_balance
+}
+
+ + + +
+ + + +## Function `actual_balance` + +Returns the actual balance of the user for the specified token. + + +
#[view]
+public fun actual_balance(owner: address, token: object::Object<fungible_asset::Metadata>): confidential_balance::CompressedConfidentialBalance
+
+ + + +
+Implementation + + +
public fun actual_balance(
+    owner: address,
+    token: Object<Metadata>): confidential_balance::CompressedConfidentialBalance acquires ConfidentialAssetStore
+{
+    assert!(has_confidential_asset_store(owner, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    let ca_store = borrow_global<ConfidentialAssetStore>(get_user_address(owner, token));
+
+    ca_store.actual_balance
+}
+
+ + + +
+ + + +## Function `encryption_key` + +Returns the encryption key (EK) of the user for the specified token. + + +
#[view]
+public fun encryption_key(user: address, token: object::Object<fungible_asset::Metadata>): ristretto255_twisted_elgamal::CompressedPubkey
+
+ + + +
+Implementation + + +
public fun encryption_key(
+    user: address,
+    token: Object<Metadata>): twisted_elgamal::CompressedPubkey acquires ConfidentialAssetStore
+{
+    assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    borrow_global_mut<ConfidentialAssetStore>(get_user_address(user, token)).ek
+}
+
+ + + +
+ + + +## Function `is_normalized` + +Checks if the user's actual balance is normalized for the specified token. + + +
#[view]
+public fun is_normalized(user: address, token: object::Object<fungible_asset::Metadata>): bool
+
+ + + +
+Implementation + + +
public fun is_normalized(user: address, token: Object<Metadata>): bool acquires ConfidentialAssetStore {
+    assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    borrow_global<ConfidentialAssetStore>(get_user_address(user, token)).normalized
+}
+
+ + + +
+ + + +## Function `is_frozen` + +Checks if the user's confidential asset store is frozen for the specified token. + + +
#[view]
+public fun is_frozen(user: address, token: object::Object<fungible_asset::Metadata>): bool
+
+ + + +
+Implementation + + +
public fun is_frozen(user: address, token: Object<Metadata>): bool acquires ConfidentialAssetStore {
+    assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    borrow_global<ConfidentialAssetStore>(get_user_address(user, token)).frozen
+}
+
+ + + +
+ + + +## Function `get_auditor` + +Returns the asset-specific auditor's encryption key. +If the auditing feature is disabled for the token, the encryption key is set to None. + + +
#[view]
+public fun get_auditor(token: object::Object<fungible_asset::Metadata>): option::Option<ristretto255_twisted_elgamal::CompressedPubkey>
+
+ + + +
+Implementation + + +
public fun get_auditor(
+    token: Object<Metadata>): Option<twisted_elgamal::CompressedPubkey> acquires FAConfig, FAController
+{
+    let fa_config_address = get_fa_config_address(token);
+
+    if (!is_allow_list_enabled() && !exists<FAConfig>(fa_config_address)) {
+        return std::option::none();
+    };
+
+    borrow_global<FAConfig>(fa_config_address).auditor_ek
+}
+
+ + + +
+ + + +## Function `confidential_asset_balance` + +Returns the circulating supply of the confidential asset. + + +
#[view]
+public fun confidential_asset_balance(token: object::Object<fungible_asset::Metadata>): u64
+
+ + + +
+Implementation + + +
public fun confidential_asset_balance(token: Object<Metadata>): u64 acquires FAController {
+    let fa_store_address = get_fa_store_address();
+    assert!(primary_fungible_store::primary_store_exists(fa_store_address, token), EINTERNAL_ERROR);
+
+    primary_fungible_store::balance(fa_store_address, token)
+}
+
+ + + +
+ + + +## Function `register_internal` + +Implementation of the register entry function. + + +
public fun register_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>, ek: ristretto255_twisted_elgamal::CompressedPubkey)
+
+ + + +
+Implementation + + +
public fun register_internal(
+    sender: &signer,
+    token: Object<Metadata>,
+    ek: twisted_elgamal::CompressedPubkey) acquires FAController, FAConfig
+{
+    assert!(is_token_allowed(token), error::invalid_argument(ETOKEN_DISABLED));
+
+    let user = signer::address_of(sender);
+
+    assert!(!has_confidential_asset_store(user, token), error::already_exists(ECA_STORE_ALREADY_PUBLISHED));
+
+    let ca_store = ConfidentialAssetStore {
+        frozen: false,
+        normalized: true,
+        pending_counter: 0,
+        pending_balance: confidential_balance::new_compressed_pending_balance_no_randomness(),
+        actual_balance: confidential_balance::new_compressed_actual_balance_no_randomness(),
+        ek,
+    };
+
+    move_to(&get_user_signer(sender, token), ca_store);
+}
+
+ + + +
+ + + +## Function `deposit_to_internal` + +Implementation of the deposit_to entry function. + + +
public fun deposit_to_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public fun deposit_to_internal(
+    sender: &signer,
+    token: Object<Metadata>,
+    to: address,
+    amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig
+{
+    assert!(is_token_allowed(token), error::invalid_argument(ETOKEN_DISABLED));
+    assert!(!is_frozen(to, token), error::invalid_state(EALREADY_FROZEN));
+
+    let from = signer::address_of(sender);
+
+    let sender_fa_store = primary_fungible_store::ensure_primary_store_exists(from, token);
+    let ca_fa_store = primary_fungible_store::ensure_primary_store_exists(get_fa_store_address(), token);
+
+    dispatchable_fungible_asset::transfer(sender, sender_fa_store, ca_fa_store, amount);
+
+    let ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(to, token));
+    let pending_balance = confidential_balance::decompress_balance(&ca_store.pending_balance);
+
+    confidential_balance::add_balances_mut(
+        &mut pending_balance,
+        &confidential_balance::new_pending_balance_u64_no_randonmess(amount)
+    );
+
+    ca_store.pending_balance = confidential_balance::compress_balance(&pending_balance);
+
+    assert!(
+        ca_store.pending_counter < MAX_TRANSFERS_BEFORE_ROLLOVER,
+        error::invalid_argument(EINTERNAL_ERROR)
+    );
+
+    ca_store.pending_counter += 1;
+
+    event::emit(Deposited { from, to, amount });
+}
+
+ + + +
+ + + +## Function `withdraw_to_internal` + +Implementation of the withdraw_to entry function. +Withdrawals are always allowed, regardless of the token allow status. + + +
public fun withdraw_to_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>, to: address, amount: u64, new_balance: confidential_balance::ConfidentialBalance, proof: confidential_proof::WithdrawalProof)
+
+ + + +
+Implementation + + +
public fun withdraw_to_internal(
+    sender: &signer,
+    token: Object<Metadata>,
+    to: address,
+    amount: u64,
+    new_balance: confidential_balance::ConfidentialBalance,
+    proof: WithdrawalProof) acquires ConfidentialAssetStore, FAController
+{
+    let from = signer::address_of(sender);
+
+    let sender_ek = encryption_key(from, token);
+
+    let ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(from, token));
+    let current_balance = confidential_balance::decompress_balance(&ca_store.actual_balance);
+
+    confidential_proof::verify_withdrawal_proof(&sender_ek, amount, ¤t_balance, &new_balance, &proof);
+
+    ca_store.normalized = true;
+    ca_store.actual_balance = confidential_balance::compress_balance(&new_balance);
+
+    primary_fungible_store::transfer(&get_fa_store_signer(), token, to, amount);
+}
+
+ + + +
+ + + +## Function `confidential_transfer_internal` + +Implementation of the confidential_transfer entry function. + + +
public fun confidential_transfer_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>, to: address, new_balance: confidential_balance::ConfidentialBalance, sender_amount: confidential_balance::ConfidentialBalance, recipient_amount: confidential_balance::ConfidentialBalance, auditor_eks: vector<ristretto255_twisted_elgamal::CompressedPubkey>, auditor_amounts: vector<confidential_balance::ConfidentialBalance>, proof: confidential_proof::TransferProof)
+
+ + + +
+Implementation + + +
public fun confidential_transfer_internal(
+    sender: &signer,
+    token: Object<Metadata>,
+    to: address,
+    new_balance: confidential_balance::ConfidentialBalance,
+    sender_amount: confidential_balance::ConfidentialBalance,
+    recipient_amount: confidential_balance::ConfidentialBalance,
+    auditor_eks: vector<twisted_elgamal::CompressedPubkey>,
+    auditor_amounts: vector<confidential_balance::ConfidentialBalance>,
+    proof: TransferProof) acquires ConfidentialAssetStore, FAConfig, FAController
+{
+    assert!(is_token_allowed(token), error::invalid_argument(ETOKEN_DISABLED));
+    assert!(!is_frozen(to, token), error::invalid_state(EALREADY_FROZEN));
+    assert!(
+        validate_auditors(token, &recipient_amount, &auditor_eks, &auditor_amounts, &proof),
+        error::invalid_argument(EINVALID_AUDITORS)
+    );
+    assert!(
+        confidential_balance::balance_c_equals(&sender_amount, &recipient_amount),
+        error::invalid_argument(EINVALID_SENDER_AMOUNT)
+    );
+
+    let from = signer::address_of(sender);
+
+    let sender_ek = encryption_key(from, token);
+    let recipient_ek = encryption_key(to, token);
+
+    let sender_ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(from, token));
+
+    let sender_current_actual_balance = confidential_balance::decompress_balance(
+        &sender_ca_store.actual_balance
+    );
+
+    confidential_proof::verify_transfer_proof(
+        &sender_ek,
+        &recipient_ek,
+        &sender_current_actual_balance,
+        &new_balance,
+        &sender_amount,
+        &recipient_amount,
+        &auditor_eks,
+        &auditor_amounts,
+        &proof);
+
+    sender_ca_store.normalized = true;
+    sender_ca_store.actual_balance = confidential_balance::compress_balance(&new_balance);
+
+    // Cannot create multiple mutable references to the same type, so we need to drop it
+    let ConfidentialAssetStore { .. } = sender_ca_store;
+
+    let recipient_ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(to, token));
+
+    assert!(
+        recipient_ca_store.pending_counter < MAX_TRANSFERS_BEFORE_ROLLOVER,
+        error::invalid_argument(EINTERNAL_ERROR)
+    );
+
+    let recipient_pending_balance = confidential_balance::decompress_balance(
+        &recipient_ca_store.pending_balance
+    );
+    confidential_balance::add_balances_mut(&mut recipient_pending_balance, &recipient_amount);
+
+    recipient_ca_store.pending_counter += 1;
+    recipient_ca_store.pending_balance = confidential_balance::compress_balance(&recipient_pending_balance);
+
+    event::emit(Transferred { from, to });
+}
+
+ + + +
+ + + +## Function `rotate_encryption_key_internal` + +Implementation of the rotate_encryption_key entry function. + + +
public fun rotate_encryption_key_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>, new_ek: ristretto255_twisted_elgamal::CompressedPubkey, new_balance: confidential_balance::ConfidentialBalance, proof: confidential_proof::RotationProof)
+
+ + + +
+Implementation + + +
public fun rotate_encryption_key_internal(
+    sender: &signer,
+    token: Object<Metadata>,
+    new_ek: twisted_elgamal::CompressedPubkey,
+    new_balance: confidential_balance::ConfidentialBalance,
+    proof: RotationProof) acquires ConfidentialAssetStore
+{
+    let user = signer::address_of(sender);
+    let current_ek = encryption_key(user, token);
+
+    let ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(user, token));
+
+    let pending_balance = confidential_balance::decompress_balance(&ca_store.pending_balance);
+
+    // We need to ensure that the pending balance is zero before rotating the key.
+    // To guarantee this, the user must call `rollover_pending_balance_and_freeze` beforehand.
+    assert!(confidential_balance::is_zero_balance(&pending_balance), error::invalid_state(ENOT_ZERO_BALANCE));
+
+    let current_balance = confidential_balance::decompress_balance(&ca_store.actual_balance);
+
+    confidential_proof::verify_rotation_proof(¤t_ek, &new_ek, ¤t_balance, &new_balance, &proof);
+
+    ca_store.ek = new_ek;
+    // We don't need to update the pending balance here, as it has been asserted to be zero.
+    ca_store.actual_balance = confidential_balance::compress_balance(&new_balance);
+    ca_store.normalized = true;
+}
+
+ + + +
+ + + +## Function `normalize_internal` + +Implementation of the normalize entry function. + + +
public fun normalize_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>, new_balance: confidential_balance::ConfidentialBalance, proof: confidential_proof::NormalizationProof)
+
+ + + +
+Implementation + + +
public fun normalize_internal(
+    sender: &signer,
+    token: Object<Metadata>,
+    new_balance: confidential_balance::ConfidentialBalance,
+    proof: NormalizationProof) acquires ConfidentialAssetStore
+{
+    let user = signer::address_of(sender);
+    let sender_ek = encryption_key(user, token);
+
+    let ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(user, token));
+
+    assert!(!ca_store.normalized, error::invalid_state(EALREADY_NORMALIZED));
+
+    let current_balance = confidential_balance::decompress_balance(&ca_store.actual_balance);
+
+    confidential_proof::verify_normalization_proof(&sender_ek, ¤t_balance, &new_balance, &proof);
+
+    ca_store.actual_balance = confidential_balance::compress_balance(&new_balance);
+    ca_store.normalized = true;
+}
+
+ + + +
+ + + +## Function `rollover_pending_balance_internal` + +Implementation of the rollover_pending_balance entry function. + + +
public fun rollover_pending_balance_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public fun rollover_pending_balance_internal(
+    sender: &signer,
+    token: Object<Metadata>) acquires ConfidentialAssetStore
+{
+    let user = signer::address_of(sender);
+
+    assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    let ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(user, token));
+
+    assert!(ca_store.normalized, error::invalid_state(ENORMALIZATION_REQUIRED));
+
+    let actual_balance = confidential_balance::decompress_balance(&ca_store.actual_balance);
+    let pending_balance = confidential_balance::decompress_balance(&ca_store.pending_balance);
+
+    confidential_balance::add_balances_mut(&mut actual_balance, &pending_balance);
+
+    ca_store.normalized = false;
+    ca_store.pending_counter = 0;
+    ca_store.actual_balance = confidential_balance::compress_balance(&actual_balance);
+    ca_store.pending_balance = confidential_balance::new_compressed_pending_balance_no_randomness();
+}
+
+ + + +
+ + + +## Function `freeze_token_internal` + +Implementation of the freeze_token entry function. + + +
public fun freeze_token_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public fun freeze_token_internal(
+    sender: &signer,
+    token: Object<Metadata>) acquires ConfidentialAssetStore
+{
+    let user = signer::address_of(sender);
+
+    assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    let ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(user, token));
+
+    assert!(!ca_store.frozen, error::invalid_state(EALREADY_FROZEN));
+
+    ca_store.frozen = true;
+}
+
+ + + +
+ + + +## Function `unfreeze_token_internal` + +Implementation of the unfreeze_token entry function. + + +
public fun unfreeze_token_internal(sender: &signer, token: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public fun unfreeze_token_internal(
+    sender: &signer,
+    token: Object<Metadata>) acquires ConfidentialAssetStore
+{
+    let user = signer::address_of(sender);
+
+    assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED));
+
+    let ca_store = borrow_global_mut<ConfidentialAssetStore>(get_user_address(user, token));
+
+    assert!(ca_store.frozen, error::invalid_state(ENOT_FROZEN));
+
+    ca_store.frozen = false;
+}
+
+ + + +
+ + + +## Function `ensure_fa_config_exists` + +Ensures that the FAConfig object exists for the specified token. +If the object does not exist, creates it. +Used only for internal purposes. + + +
fun ensure_fa_config_exists(token: object::Object<fungible_asset::Metadata>): address
+
+ + + +
+Implementation + + +
fun ensure_fa_config_exists(token: Object<Metadata>): address acquires FAController {
+    let fa_config_address = get_fa_config_address(token);
+
+    if (!exists<FAConfig>(fa_config_address)) {
+        let fa_config_singer = get_fa_config_signer(token);
+
+        move_to(&fa_config_singer, FAConfig {
+            allowed: false,
+            auditor_ek: std::option::none(),
+        });
+    };
+
+    fa_config_address
+}
+
+ + + +
+ + + +## Function `get_fa_store_signer` + +Returns an object for handling all the FA primary stores, and returns a signer for it. + + +
fun get_fa_store_signer(): signer
+
+ + + +
+Implementation + + +
fun get_fa_store_signer(): signer acquires FAController {
+    object::generate_signer_for_extending(&borrow_global<FAController>(@aptos_experimental).extend_ref)
+}
+
+ + + +
+ + + +## Function `get_fa_store_address` + +Returns the address that handles all the FA primary stores. + + +
fun get_fa_store_address(): address
+
+ + + +
+Implementation + + +
fun get_fa_store_address(): address acquires FAController {
+    object::address_from_extend_ref(&borrow_global<FAController>(@aptos_experimental).extend_ref)
+}
+
+ + + +
+ + + +## Function `get_user_signer` + +Returns an object for handling the ConfidentialAssetStore and returns a signer for it. + + +
fun get_user_signer(user: &signer, token: object::Object<fungible_asset::Metadata>): signer
+
+ + + +
+Implementation + + +
fun get_user_signer(user: &signer, token: Object<Metadata>): signer {
+    let user_ctor = &object::create_named_object(user, construct_user_seed(token));
+
+    object::generate_signer(user_ctor)
+}
+
+ + + +
+ + + +## Function `get_user_address` + +Returns the address that handles the user's ConfidentialAssetStore object for the specified user and token. + + +
fun get_user_address(user: address, token: object::Object<fungible_asset::Metadata>): address
+
+ + + +
+Implementation + + +
fun get_user_address(user: address, token: Object<Metadata>): address {
+    object::create_object_address(&user, construct_user_seed(token))
+}
+
+ + + +
+ + + +## Function `get_fa_config_signer` + +Returns an object for handling the FAConfig, and returns a signer for it. + + +
fun get_fa_config_signer(token: object::Object<fungible_asset::Metadata>): signer
+
+ + + +
+Implementation + + +
fun get_fa_config_signer(token: Object<Metadata>): signer acquires FAController {
+    let fa_ext = &borrow_global<FAController>(@aptos_experimental).extend_ref;
+    let fa_ext_signer = object::generate_signer_for_extending(fa_ext);
+
+    let fa_ctor = &object::create_named_object(&fa_ext_signer, construct_fa_seed(token));
+
+    object::generate_signer(fa_ctor)
+}
+
+ + + +
+ + + +## Function `get_fa_config_address` + +Returns the address that handles primary FA store and FAConfig objects for the specified token. + + +
fun get_fa_config_address(token: object::Object<fungible_asset::Metadata>): address
+
+ + + +
+Implementation + + +
fun get_fa_config_address(token: Object<Metadata>): address acquires FAController {
+    let fa_ext = &borrow_global<FAController>(@aptos_experimental).extend_ref;
+    let fa_ext_address = object::address_from_extend_ref(fa_ext);
+
+    object::create_object_address(&fa_ext_address, construct_fa_seed(token))
+}
+
+ + + +
+ + + +## Function `construct_user_seed` + +Constructs a unique seed for the user's ConfidentialAssetStore object. +As all the ConfidentialAssetStore's have the same type, we need to differentiate them by the seed. + + +
fun construct_user_seed(token: object::Object<fungible_asset::Metadata>): vector<u8>
+
+ + + +
+Implementation + + +
fun construct_user_seed(token: Object<Metadata>): vector<u8> {
+    bcs::to_bytes(
+        &string_utils::format2(
+            &b"confidential_asset::{}::token::{}::user",
+            @aptos_experimental,
+            object::object_address(&token)
+        )
+    )
+}
+
+ + + +
+ + + +## Function `construct_fa_seed` + +Constructs a unique seed for the FA's FAConfig object. +As all the FAConfig's have the same type, we need to differentiate them by the seed. + + +
fun construct_fa_seed(token: object::Object<fungible_asset::Metadata>): vector<u8>
+
+ + + +
+Implementation + + +
fun construct_fa_seed(token: Object<Metadata>): vector<u8> {
+    bcs::to_bytes(
+        &string_utils::format2(
+            &b"confidential_asset::{}::token::{}::fa",
+            @aptos_experimental,
+            object::object_address(&token)
+        )
+    )
+}
+
+ + + +
+ + + +## Function `validate_auditors` + +Validates that the auditor-related fields in the confidential transfer are correct. +Returns false if the transfer amount is not the same as the auditor amounts. +Returns false if the number of auditors in the transfer proof and auditor lists do not match. +Returns false if the first auditor in the list and the asset-specific auditor do not match. +Note: If the asset-specific auditor is not set, the validation is successful for any list of auditors. +Otherwise, returns true. + + +
fun validate_auditors(token: object::Object<fungible_asset::Metadata>, transfer_amount: &confidential_balance::ConfidentialBalance, auditor_eks: &vector<ristretto255_twisted_elgamal::CompressedPubkey>, auditor_amounts: &vector<confidential_balance::ConfidentialBalance>, proof: &confidential_proof::TransferProof): bool
+
+ + + +
+Implementation + + +
fun validate_auditors(
+    token: Object<Metadata>,
+    transfer_amount: &confidential_balance::ConfidentialBalance,
+    auditor_eks: &vector<twisted_elgamal::CompressedPubkey>,
+    auditor_amounts: &vector<confidential_balance::ConfidentialBalance>,
+    proof: &TransferProof): bool acquires FAConfig, FAController
+{
+    if (
+        !auditor_amounts.all(|auditor_amount| {
+            confidential_balance::balance_c_equals(transfer_amount, auditor_amount)
+        })
+    ) {
+        return false
+    };
+
+    if (
+        auditor_eks.length() != auditor_amounts.length() ||
+            auditor_eks.length() != confidential_proof::auditors_count_in_transfer_proof(proof)
+    ) {
+        return false
+    };
+
+    let asset_auditor_ek = get_auditor(token);
+    if (asset_auditor_ek.is_none()) {
+        return true
+    };
+
+    if (auditor_eks.length() == 0) {
+        return false
+    };
+
+    let asset_auditor_ek = twisted_elgamal::pubkey_to_point(&asset_auditor_ek.extract());
+    let first_auditor_ek = twisted_elgamal::pubkey_to_point(&auditor_eks[0]);
+
+    ristretto255::point_equals(&asset_auditor_ek, &first_auditor_ek)
+}
+
+ + + +
+ + + +## Function `deserialize_auditor_eks` + +Deserializes the auditor EKs from a byte array. +Returns Some(vector<twisted_elgamal::CompressedPubkey>) if the deserialization is successful, otherwise None. + + +
fun deserialize_auditor_eks(auditor_eks_bytes: vector<u8>): option::Option<vector<ristretto255_twisted_elgamal::CompressedPubkey>>
+
+ + + +
+Implementation + + +
fun deserialize_auditor_eks(
+    auditor_eks_bytes: vector<u8>): Option<vector<twisted_elgamal::CompressedPubkey>>
+{
+    if (auditor_eks_bytes.length() % 32 != 0) {
+        return std::option::none()
+    };
+
+    let auditors_count = auditor_eks_bytes.length() / 32;
+
+    let auditor_eks = vector::range(0, auditors_count).map(|i| {
+        twisted_elgamal::new_pubkey_from_bytes(auditor_eks_bytes.slice(i * 32, (i + 1) * 32))
+    });
+
+    if (auditor_eks.any(|ek| ek.is_none())) {
+        return std::option::none()
+    };
+
+    std::option::some(auditor_eks.map(|ek| ek.extract()))
+}
+
+ + + +
+ + + +## Function `deserialize_auditor_amounts` + +Deserializes the auditor amounts from a byte array. +Returns Some(vector<confidential_balance::ConfidentialBalance>) if the deserialization is successful, otherwise None. + + +
fun deserialize_auditor_amounts(auditor_amounts_bytes: vector<u8>): option::Option<vector<confidential_balance::ConfidentialBalance>>
+
+ + + +
+Implementation + + +
fun deserialize_auditor_amounts(
+    auditor_amounts_bytes: vector<u8>): Option<vector<confidential_balance::ConfidentialBalance>>
+{
+    if (auditor_amounts_bytes.length() % 256 != 0) {
+        return std::option::none()
+    };
+
+    let auditors_count = auditor_amounts_bytes.length() / 256;
+
+    let auditor_amounts = vector::range(0, auditors_count).map(|i| {
+        confidential_balance::new_pending_balance_from_bytes(auditor_amounts_bytes.slice(i * 256, (i + 1) * 256))
+    });
+
+    if (auditor_amounts.any(|ek| ek.is_none())) {
+        return std::option::none()
+    };
+
+    std::option::some(auditor_amounts.map(|balance| balance.extract()))
+}
+
+ + + +
+ + + +## Function `ensure_sufficient_fa` + +Converts coins to missing FA. +Returns Some(Object<Metadata>) if user has a suffucient amount of FA to proceed, otherwise None. + + +
fun ensure_sufficient_fa<CoinType>(sender: &signer, amount: u64): option::Option<object::Object<fungible_asset::Metadata>>
+
+ + + +
+Implementation + + +
fun ensure_sufficient_fa<CoinType>(sender: &signer, amount: u64): Option<Object<Metadata>> {
+    let user = signer::address_of(sender);
+    let fa = coin::paired_metadata<CoinType>();
+
+    if (fa.is_none()) {
+        return fa;
+    };
+
+    let fa_balance = primary_fungible_store::balance(user, *fa.borrow());
+
+    if (fa_balance >= amount) {
+        return fa;
+    };
+
+    if (coin::balance<CoinType>(user) < amount) {
+        return std::option::none();
+    };
+
+    let coin_amount = coin::withdraw<CoinType>(sender, amount - fa_balance);
+    let fa_amount = coin::coin_to_fungible_asset(coin_amount);
+
+    primary_fungible_store::deposit(user, fa_amount);
+
+    fa
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/confidential_balance.md b/aptos-move/framework/aptos-experimental/doc/confidential_balance.md new file mode 100644 index 0000000000000..30bf118e09704 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/confidential_balance.md @@ -0,0 +1,823 @@ + + + +# Module `0x7::confidential_balance` + +This module implements a Confidential Balance abstraction, built on top of Twisted ElGamal encryption, +over the Ristretto255 curve. + +The Confidential Balance encapsulates encrypted representations of a balance, split into chunks and stored as pairs of +ciphertext components (C_i, D_i) under basepoints G and H and an encryption key P = dk^(-1) * H, where dk +is the corresponding decryption key. Each pair represents an encrypted value a_i - the i-th 16-bit portion of +the total encrypted amount - and its associated randomness r_i, such that C_i = a_i * G + r_i * H and D_i = r_i * P. + +The module supports two types of balances: +- Pending balances are represented by four ciphertext pairs (C_i, D_i), i = 1..4, suitable for 64-bit values. +- Actual balances are represented by eight ciphertext pairs (C_i, D_i), i = 1..8, capable of handling 128-bit values. + +This implementation leverages the homomorphic properties of Twisted ElGamal encryption to allow arithmetic operations +directly on encrypted data. + + +- [Struct `CompressedConfidentialBalance`](#0x7_confidential_balance_CompressedConfidentialBalance) +- [Struct `ConfidentialBalance`](#0x7_confidential_balance_ConfidentialBalance) +- [Constants](#@Constants_0) +- [Function `new_pending_balance_no_randomness`](#0x7_confidential_balance_new_pending_balance_no_randomness) +- [Function `new_actual_balance_no_randomness`](#0x7_confidential_balance_new_actual_balance_no_randomness) +- [Function `new_compressed_pending_balance_no_randomness`](#0x7_confidential_balance_new_compressed_pending_balance_no_randomness) +- [Function `new_compressed_actual_balance_no_randomness`](#0x7_confidential_balance_new_compressed_actual_balance_no_randomness) +- [Function `new_pending_balance_u64_no_randonmess`](#0x7_confidential_balance_new_pending_balance_u64_no_randonmess) +- [Function `new_pending_balance_from_bytes`](#0x7_confidential_balance_new_pending_balance_from_bytes) +- [Function `new_actual_balance_from_bytes`](#0x7_confidential_balance_new_actual_balance_from_bytes) +- [Function `compress_balance`](#0x7_confidential_balance_compress_balance) +- [Function `decompress_balance`](#0x7_confidential_balance_decompress_balance) +- [Function `balance_to_bytes`](#0x7_confidential_balance_balance_to_bytes) +- [Function `balance_to_points_c`](#0x7_confidential_balance_balance_to_points_c) +- [Function `balance_to_points_d`](#0x7_confidential_balance_balance_to_points_d) +- [Function `add_balances_mut`](#0x7_confidential_balance_add_balances_mut) +- [Function `sub_balances_mut`](#0x7_confidential_balance_sub_balances_mut) +- [Function `balance_equals`](#0x7_confidential_balance_balance_equals) +- [Function `balance_c_equals`](#0x7_confidential_balance_balance_c_equals) +- [Function `is_zero_balance`](#0x7_confidential_balance_is_zero_balance) +- [Function `split_into_chunks_u64`](#0x7_confidential_balance_split_into_chunks_u64) +- [Function `split_into_chunks_u128`](#0x7_confidential_balance_split_into_chunks_u128) +- [Function `get_pending_balance_chunks`](#0x7_confidential_balance_get_pending_balance_chunks) +- [Function `get_actual_balance_chunks`](#0x7_confidential_balance_get_actual_balance_chunks) +- [Function `get_chunk_size_bits`](#0x7_confidential_balance_get_chunk_size_bits) + + +
use 0x1::error;
+use 0x1::option;
+use 0x1::ristretto255;
+use 0x1::vector;
+use 0x7::ristretto255_twisted_elgamal;
+
+ + + + + +## Struct `CompressedConfidentialBalance` + +Represents a compressed confidential balance, where each chunk is a compressed Twisted ElGamal ciphertext. + + +
struct CompressedConfidentialBalance has copy, drop, store
+
+ + + +
+Fields + + +
+
+chunks: vector<ristretto255_twisted_elgamal::CompressedCiphertext> +
+
+ +
+
+ + +
+ + + +## Struct `ConfidentialBalance` + +Represents a confidential balance, where each chunk is a Twisted ElGamal ciphertext. + + +
struct ConfidentialBalance has drop
+
+ + + +
+Fields + + +
+
+chunks: vector<ristretto255_twisted_elgamal::Ciphertext> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The number of chunks in an actual balance. + + +
const ACTUAL_BALANCE_CHUNKS: u64 = 8;
+
+ + + + + +The number of bits in a single chunk. + + +
const CHUNK_SIZE_BITS: u64 = 16;
+
+ + + + + +An internal error occurred, indicating unexpected behavior. + + +
const EINTERNAL_ERROR: u64 = 1;
+
+ + + + + +The number of chunks in a pending balance. + + +
const PENDING_BALANCE_CHUNKS: u64 = 4;
+
+ + + + + +## Function `new_pending_balance_no_randomness` + +Creates a new zero pending balance, where each chunk is set to zero Twisted ElGamal ciphertext. + + +
public fun new_pending_balance_no_randomness(): confidential_balance::ConfidentialBalance
+
+ + + +
+Implementation + + +
public fun new_pending_balance_no_randomness(): ConfidentialBalance {
+    ConfidentialBalance {
+        chunks: vector::range(0, PENDING_BALANCE_CHUNKS).map(|_| {
+            twisted_elgamal::ciphertext_from_points(ristretto255::point_identity(), ristretto255::point_identity())
+        })
+    }
+}
+
+ + + +
+ + + +## Function `new_actual_balance_no_randomness` + +Creates a new zero actual balance, where each chunk is set to zero Twisted ElGamal ciphertext. + + +
public fun new_actual_balance_no_randomness(): confidential_balance::ConfidentialBalance
+
+ + + +
+Implementation + + +
public fun new_actual_balance_no_randomness(): ConfidentialBalance {
+    ConfidentialBalance {
+        chunks: vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|_| {
+            twisted_elgamal::ciphertext_from_points(ristretto255::point_identity(), ristretto255::point_identity())
+        })
+    }
+}
+
+ + + +
+ + + +## Function `new_compressed_pending_balance_no_randomness` + +Creates a new compressed zero pending balance, where each chunk is set to compressed zero Twisted ElGamal ciphertext. + + +
public fun new_compressed_pending_balance_no_randomness(): confidential_balance::CompressedConfidentialBalance
+
+ + + +
+Implementation + + +
public fun new_compressed_pending_balance_no_randomness(): CompressedConfidentialBalance {
+    CompressedConfidentialBalance {
+        chunks: vector::range(0, PENDING_BALANCE_CHUNKS).map(|_| {
+            twisted_elgamal::ciphertext_from_compressed_points(
+                ristretto255::point_identity_compressed(), ristretto255::point_identity_compressed())
+        })
+    }
+}
+
+ + + +
+ + + +## Function `new_compressed_actual_balance_no_randomness` + +Creates a new compressed zero actual balance, where each chunk is set to compressed zero Twisted ElGamal ciphertext. + + +
public fun new_compressed_actual_balance_no_randomness(): confidential_balance::CompressedConfidentialBalance
+
+ + + +
+Implementation + + +
public fun new_compressed_actual_balance_no_randomness(): CompressedConfidentialBalance {
+    CompressedConfidentialBalance {
+        chunks: vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|_| {
+            twisted_elgamal::ciphertext_from_compressed_points(
+                ristretto255::point_identity_compressed(), ristretto255::point_identity_compressed())
+        })
+    }
+}
+
+ + + +
+ + + +## Function `new_pending_balance_u64_no_randonmess` + +Creates a new pending balance from a 64-bit amount with no randomness, splitting the amount into four 16-bit chunks. + + +
public fun new_pending_balance_u64_no_randonmess(amount: u64): confidential_balance::ConfidentialBalance
+
+ + + +
+Implementation + + +
public fun new_pending_balance_u64_no_randonmess(amount: u64): ConfidentialBalance {
+    ConfidentialBalance {
+        chunks: split_into_chunks_u64(amount).map(|chunk| {
+            twisted_elgamal::new_ciphertext_no_randomness(&chunk)
+        })
+    }
+}
+
+ + + +
+ + + +## Function `new_pending_balance_from_bytes` + +Creates a new pending balance from a serialized byte array representation. +Returns Some(ConfidentialBalance) if deserialization succeeds, otherwise None. + + +
public fun new_pending_balance_from_bytes(bytes: vector<u8>): option::Option<confidential_balance::ConfidentialBalance>
+
+ + + +
+Implementation + + +
public fun new_pending_balance_from_bytes(bytes: vector<u8>): Option<ConfidentialBalance> {
+    if (bytes.length() != 64 * PENDING_BALANCE_CHUNKS) {
+        return std::option::none()
+    };
+
+    let chunks = vector::range(0, PENDING_BALANCE_CHUNKS).map(|i| {
+        twisted_elgamal::new_ciphertext_from_bytes(bytes.slice(i * 64, (i + 1) * 64))
+    });
+
+    if (chunks.any(|chunk| chunk.is_none())) {
+        return std::option::none()
+    };
+
+    option::some(ConfidentialBalance {
+        chunks: chunks.map(|chunk| chunk.extract())
+    })
+}
+
+ + + +
+ + + +## Function `new_actual_balance_from_bytes` + +Creates a new actual balance from a serialized byte array representation. +Returns Some(ConfidentialBalance) if deserialization succeeds, otherwise None. + + +
public fun new_actual_balance_from_bytes(bytes: vector<u8>): option::Option<confidential_balance::ConfidentialBalance>
+
+ + + +
+Implementation + + +
public fun new_actual_balance_from_bytes(bytes: vector<u8>): Option<ConfidentialBalance> {
+    if (bytes.length() != 64 * ACTUAL_BALANCE_CHUNKS) {
+        return std::option::none()
+    };
+
+    let chunks = vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|i| {
+        twisted_elgamal::new_ciphertext_from_bytes(bytes.slice(i * 64, (i + 1) * 64))
+    });
+
+    if (chunks.any(|chunk| chunk.is_none())) {
+        return std::option::none()
+    };
+
+    option::some(ConfidentialBalance {
+        chunks: chunks.map(|chunk| chunk.extract())
+    })
+}
+
+ + + +
+ + + +## Function `compress_balance` + +Compresses a confidential balance into its CompressedConfidentialBalance representation. + + +
public fun compress_balance(balance: &confidential_balance::ConfidentialBalance): confidential_balance::CompressedConfidentialBalance
+
+ + + +
+Implementation + + +
public fun compress_balance(balance: &ConfidentialBalance): CompressedConfidentialBalance {
+    CompressedConfidentialBalance {
+        chunks: balance.chunks.map_ref(|ciphertext| twisted_elgamal::compress_ciphertext(ciphertext))
+    }
+}
+
+ + + +
+ + + +## Function `decompress_balance` + +Decompresses a compressed confidential balance into its ConfidentialBalance representation. + + +
public fun decompress_balance(balance: &confidential_balance::CompressedConfidentialBalance): confidential_balance::ConfidentialBalance
+
+ + + +
+Implementation + + +
public fun decompress_balance(balance: &CompressedConfidentialBalance): ConfidentialBalance {
+    ConfidentialBalance {
+        chunks: balance.chunks.map_ref(|ciphertext| twisted_elgamal::decompress_ciphertext(ciphertext))
+    }
+}
+
+ + + +
+ + + +## Function `balance_to_bytes` + +Serializes a confidential balance into a byte array representation. + + +
public fun balance_to_bytes(balance: &confidential_balance::ConfidentialBalance): vector<u8>
+
+ + + +
+Implementation + + +
public fun balance_to_bytes(balance: &ConfidentialBalance): vector<u8> {
+    let bytes = vector<u8>[];
+
+    balance.chunks.for_each_ref(|ciphertext| {
+        bytes.append(twisted_elgamal::ciphertext_to_bytes(ciphertext));
+    });
+
+    bytes
+}
+
+ + + +
+ + + +## Function `balance_to_points_c` + +Extracts the C value component (a * H + r * G) of each chunk in a confidential balance as a vector of RistrettoPoints. + + +
public fun balance_to_points_c(balance: &confidential_balance::ConfidentialBalance): vector<ristretto255::RistrettoPoint>
+
+ + + +
+Implementation + + +
public fun balance_to_points_c(balance: &ConfidentialBalance): vector<RistrettoPoint> {
+    balance.chunks.map_ref(|chunk| {
+        let (c, _) = twisted_elgamal::ciphertext_as_points(chunk);
+        ristretto255::point_clone(c)
+    })
+}
+
+ + + +
+ + + +## Function `balance_to_points_d` + +Extracts the D randomness component (r * Y) of each chunk in a confidential balance as a vector of RistrettoPoints. + + +
public fun balance_to_points_d(balance: &confidential_balance::ConfidentialBalance): vector<ristretto255::RistrettoPoint>
+
+ + + +
+Implementation + + +
public fun balance_to_points_d(balance: &ConfidentialBalance): vector<RistrettoPoint> {
+    balance.chunks.map_ref(|chunk| {
+        let (_, d) = twisted_elgamal::ciphertext_as_points(chunk);
+        ristretto255::point_clone(d)
+    })
+}
+
+ + + +
+ + + +## Function `add_balances_mut` + +Adds two confidential balances homomorphically, mutating the first balance in place. +The second balance must have fewer or equal chunks compared to the first. + + +
public fun add_balances_mut(lhs: &mut confidential_balance::ConfidentialBalance, rhs: &confidential_balance::ConfidentialBalance)
+
+ + + +
+Implementation + + +
public fun add_balances_mut(lhs: &mut ConfidentialBalance, rhs: &ConfidentialBalance) {
+    assert!(lhs.chunks.length() >= rhs.chunks.length(), error::internal(EINTERNAL_ERROR));
+
+    lhs.chunks.enumerate_mut(|i, chunk| {
+        if (i < rhs.chunks.length()) {
+            twisted_elgamal::ciphertext_add_assign(chunk, &rhs.chunks[i])
+        }
+    })
+}
+
+ + + +
+ + + +## Function `sub_balances_mut` + +Subtracts one confidential balance from another homomorphically, mutating the first balance in place. +The second balance must have fewer or equal chunks compared to the first. + + +
public fun sub_balances_mut(lhs: &mut confidential_balance::ConfidentialBalance, rhs: &confidential_balance::ConfidentialBalance)
+
+ + + +
+Implementation + + +
public fun sub_balances_mut(lhs: &mut ConfidentialBalance, rhs: &ConfidentialBalance) {
+    assert!(lhs.chunks.length() >= rhs.chunks.length(), error::internal(EINTERNAL_ERROR));
+
+    lhs.chunks.enumerate_mut(|i, chunk| {
+        if (i < rhs.chunks.length()) {
+            twisted_elgamal::ciphertext_add_assign(chunk, &rhs.chunks[i])
+        }
+    })
+}
+
+ + + +
+ + + +## Function `balance_equals` + +Checks if two confidential balances are equivalent, including both value and randomness components. + + +
public fun balance_equals(lhs: &confidential_balance::ConfidentialBalance, rhs: &confidential_balance::ConfidentialBalance): bool
+
+ + + +
+Implementation + + +
public fun balance_equals(lhs: &ConfidentialBalance, rhs: &ConfidentialBalance): bool {
+    assert!(lhs.chunks.length() == rhs.chunks.length(), error::internal(EINTERNAL_ERROR));
+
+    let ok = true;
+
+    lhs.chunks.zip_ref(&rhs.chunks, |l, r| {
+        ok = ok && twisted_elgamal::ciphertext_equals(l, r);
+    });
+
+    ok
+}
+
+ + + +
+ + + +## Function `balance_c_equals` + +Checks if the corresponding value components (C) of two confidential balances are equivalent. + + +
public fun balance_c_equals(lhs: &confidential_balance::ConfidentialBalance, rhs: &confidential_balance::ConfidentialBalance): bool
+
+ + + +
+Implementation + + +
public fun balance_c_equals(lhs: &ConfidentialBalance, rhs: &ConfidentialBalance): bool {
+    assert!(lhs.chunks.length() == rhs.chunks.length(), error::internal(EINTERNAL_ERROR));
+
+    let ok = true;
+
+    lhs.chunks.zip_ref(&rhs.chunks, |l, r| {
+        let (lc, _) = twisted_elgamal::ciphertext_as_points(l);
+        let (rc, _) = twisted_elgamal::ciphertext_as_points(r);
+
+        ok = ok && ristretto255::point_equals(lc, rc);
+    });
+
+    ok
+}
+
+ + + +
+ + + +## Function `is_zero_balance` + +Checks if a confidential balance is equivalent to zero, where all chunks are the identity element. + + +
public fun is_zero_balance(balance: &confidential_balance::ConfidentialBalance): bool
+
+ + + +
+Implementation + + +
public fun is_zero_balance(balance: &ConfidentialBalance): bool {
+    balance.chunks.all(|chunk| {
+        twisted_elgamal::ciphertext_equals(
+            chunk,
+            &twisted_elgamal::ciphertext_from_points(ristretto255::point_identity(), ristretto255::point_identity())
+        )
+    })
+}
+
+ + + +
+ + + +## Function `split_into_chunks_u64` + +Splits a 64-bit integer amount into four 16-bit chunks, represented as Scalar values. + + +
public fun split_into_chunks_u64(amount: u64): vector<ristretto255::Scalar>
+
+ + + +
+Implementation + + +
public fun split_into_chunks_u64(amount: u64): vector<Scalar> {
+    vector::range(0, PENDING_BALANCE_CHUNKS).map(|i| {
+        ristretto255::new_scalar_from_u64(amount >> (i * CHUNK_SIZE_BITS as u8) & 0xffff)
+    })
+}
+
+ + + +
+ + + +## Function `split_into_chunks_u128` + +Splits a 128-bit integer amount into eight 16-bit chunks, represented as Scalar values. + + +
public fun split_into_chunks_u128(amount: u128): vector<ristretto255::Scalar>
+
+ + + +
+Implementation + + +
public fun split_into_chunks_u128(amount: u128): vector<Scalar> {
+    vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|i| {
+        ristretto255::new_scalar_from_u128(amount >> (i * CHUNK_SIZE_BITS as u8) & 0xffff)
+    })
+}
+
+ + + +
+ + + +## Function `get_pending_balance_chunks` + +Returns the number of chunks in a pending balance. + + +
#[view]
+public fun get_pending_balance_chunks(): u64
+
+ + + +
+Implementation + + +
public fun get_pending_balance_chunks(): u64 {
+    PENDING_BALANCE_CHUNKS
+}
+
+ + + +
+ + + +## Function `get_actual_balance_chunks` + +Returns the number of chunks in an actual balance. + + +
#[view]
+public fun get_actual_balance_chunks(): u64
+
+ + + +
+Implementation + + +
public fun get_actual_balance_chunks(): u64 {
+    ACTUAL_BALANCE_CHUNKS
+}
+
+ + + +
+ + + +## Function `get_chunk_size_bits` + +Returns the number of bits in a single chunk. + + +
#[view]
+public fun get_chunk_size_bits(): u64
+
+ + + +
+Implementation + + +
public fun get_chunk_size_bits(): u64 {
+    CHUNK_SIZE_BITS
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/confidential_proof.md b/aptos-move/framework/aptos-experimental/doc/confidential_proof.md new file mode 100644 index 0000000000000..9ed1df4102168 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/confidential_proof.md @@ -0,0 +1,2982 @@ + + + +# Module `0x7::confidential_proof` + +The confidential_proof module provides the infrastructure for verifying zero-knowledge proofs used in the Confidential Asset protocol. +These proofs ensure correctness for operations such as confidential_transfer, withdraw, rotate_encryption_key, and normalize. + + +- [Struct `WithdrawalProof`](#0x7_confidential_proof_WithdrawalProof) +- [Struct `TransferProof`](#0x7_confidential_proof_TransferProof) +- [Struct `NormalizationProof`](#0x7_confidential_proof_NormalizationProof) +- [Struct `RotationProof`](#0x7_confidential_proof_RotationProof) +- [Struct `WithdrawalSigmaProofXs`](#0x7_confidential_proof_WithdrawalSigmaProofXs) +- [Struct `WithdrawalSigmaProofAlphas`](#0x7_confidential_proof_WithdrawalSigmaProofAlphas) +- [Struct `WithdrawalSigmaProofGammas`](#0x7_confidential_proof_WithdrawalSigmaProofGammas) +- [Struct `WithdrawalSigmaProof`](#0x7_confidential_proof_WithdrawalSigmaProof) +- [Struct `TransferSigmaProofXs`](#0x7_confidential_proof_TransferSigmaProofXs) +- [Struct `TransferSigmaProofAlphas`](#0x7_confidential_proof_TransferSigmaProofAlphas) +- [Struct `TransferSigmaProofGammas`](#0x7_confidential_proof_TransferSigmaProofGammas) +- [Struct `TransferSigmaProof`](#0x7_confidential_proof_TransferSigmaProof) +- [Struct `NormalizationSigmaProofXs`](#0x7_confidential_proof_NormalizationSigmaProofXs) +- [Struct `NormalizationSigmaProofAlphas`](#0x7_confidential_proof_NormalizationSigmaProofAlphas) +- [Struct `NormalizationSigmaProofGammas`](#0x7_confidential_proof_NormalizationSigmaProofGammas) +- [Struct `NormalizationSigmaProof`](#0x7_confidential_proof_NormalizationSigmaProof) +- [Struct `RotationSigmaProofXs`](#0x7_confidential_proof_RotationSigmaProofXs) +- [Struct `RotationSigmaProofAlphas`](#0x7_confidential_proof_RotationSigmaProofAlphas) +- [Struct `RotationSigmaProofGammas`](#0x7_confidential_proof_RotationSigmaProofGammas) +- [Struct `RotationSigmaProof`](#0x7_confidential_proof_RotationSigmaProof) +- [Constants](#@Constants_0) +- [Function `verify_withdrawal_proof`](#0x7_confidential_proof_verify_withdrawal_proof) +- [Function `verify_transfer_proof`](#0x7_confidential_proof_verify_transfer_proof) +- [Function `verify_normalization_proof`](#0x7_confidential_proof_verify_normalization_proof) +- [Function `verify_rotation_proof`](#0x7_confidential_proof_verify_rotation_proof) +- [Function `verify_withdrawal_sigma_proof`](#0x7_confidential_proof_verify_withdrawal_sigma_proof) +- [Function `verify_transfer_sigma_proof`](#0x7_confidential_proof_verify_transfer_sigma_proof) +- [Function `verify_normalization_sigma_proof`](#0x7_confidential_proof_verify_normalization_sigma_proof) +- [Function `verify_rotation_sigma_proof`](#0x7_confidential_proof_verify_rotation_sigma_proof) +- [Function `verify_new_balance_range_proof`](#0x7_confidential_proof_verify_new_balance_range_proof) +- [Function `verify_transfer_amount_range_proof`](#0x7_confidential_proof_verify_transfer_amount_range_proof) +- [Function `auditors_count_in_transfer_proof`](#0x7_confidential_proof_auditors_count_in_transfer_proof) +- [Function `deserialize_withdrawal_proof`](#0x7_confidential_proof_deserialize_withdrawal_proof) +- [Function `deserialize_transfer_proof`](#0x7_confidential_proof_deserialize_transfer_proof) +- [Function `deserialize_normalization_proof`](#0x7_confidential_proof_deserialize_normalization_proof) +- [Function `deserialize_rotation_proof`](#0x7_confidential_proof_deserialize_rotation_proof) +- [Function `deserialize_withdrawal_sigma_proof`](#0x7_confidential_proof_deserialize_withdrawal_sigma_proof) +- [Function `deserialize_transfer_sigma_proof`](#0x7_confidential_proof_deserialize_transfer_sigma_proof) +- [Function `deserialize_normalization_sigma_proof`](#0x7_confidential_proof_deserialize_normalization_sigma_proof) +- [Function `deserialize_rotation_sigma_proof`](#0x7_confidential_proof_deserialize_rotation_sigma_proof) +- [Function `get_fiat_shamir_withdrawal_sigma_dst`](#0x7_confidential_proof_get_fiat_shamir_withdrawal_sigma_dst) +- [Function `get_fiat_shamir_transfer_sigma_dst`](#0x7_confidential_proof_get_fiat_shamir_transfer_sigma_dst) +- [Function `get_fiat_shamir_normalization_sigma_dst`](#0x7_confidential_proof_get_fiat_shamir_normalization_sigma_dst) +- [Function `get_fiat_shamir_rotation_sigma_dst`](#0x7_confidential_proof_get_fiat_shamir_rotation_sigma_dst) +- [Function `get_bulletproofs_dst`](#0x7_confidential_proof_get_bulletproofs_dst) +- [Function `get_bulletproofs_num_bits`](#0x7_confidential_proof_get_bulletproofs_num_bits) +- [Function `fiat_shamir_withdrawal_sigma_proof_challenge`](#0x7_confidential_proof_fiat_shamir_withdrawal_sigma_proof_challenge) +- [Function `fiat_shamir_transfer_sigma_proof_challenge`](#0x7_confidential_proof_fiat_shamir_transfer_sigma_proof_challenge) +- [Function `fiat_shamir_normalization_sigma_proof_challenge`](#0x7_confidential_proof_fiat_shamir_normalization_sigma_proof_challenge) +- [Function `fiat_shamir_rotation_sigma_proof_challenge`](#0x7_confidential_proof_fiat_shamir_rotation_sigma_proof_challenge) +- [Function `msm_withdrawal_gammas`](#0x7_confidential_proof_msm_withdrawal_gammas) +- [Function `msm_transfer_gammas`](#0x7_confidential_proof_msm_transfer_gammas) +- [Function `msm_normalization_gammas`](#0x7_confidential_proof_msm_normalization_gammas) +- [Function `msm_rotation_gammas`](#0x7_confidential_proof_msm_rotation_gammas) +- [Function `msm_gamma_1`](#0x7_confidential_proof_msm_gamma_1) +- [Function `msm_gamma_2`](#0x7_confidential_proof_msm_gamma_2) +- [Function `scalar_mul_3`](#0x7_confidential_proof_scalar_mul_3) +- [Function `scalar_linear_combination`](#0x7_confidential_proof_scalar_linear_combination) +- [Function `new_scalar_from_pow2`](#0x7_confidential_proof_new_scalar_from_pow2) + + +
use 0x1::error;
+use 0x1::option;
+use 0x1::ristretto255;
+use 0x1::ristretto255_bulletproofs;
+use 0x1::vector;
+use 0x7::confidential_balance;
+use 0x7::ristretto255_twisted_elgamal;
+
+ + + + + +## Struct `WithdrawalProof` + +Represents the proof structure for validating a withdrawal operation. + + +
struct WithdrawalProof has drop
+
+ + + +
+Fields + + +
+
+sigma_proof: confidential_proof::WithdrawalSigmaProof +
+
+ Sigma proof ensuring that the withdrawal operation maintains balance integrity. +
+
+zkrp_new_balance: ristretto255_bulletproofs::RangeProof +
+
+ Range proof ensuring that the resulting balance chunks are normalized (i.e., within the 16-bit limit). +
+
+ + +
+ + + +## Struct `TransferProof` + +Represents the proof structure for validating a transfer operation. + + +
struct TransferProof has drop
+
+ + + +
+Fields + + +
+
+sigma_proof: confidential_proof::TransferSigmaProof +
+
+ Sigma proof ensuring that the transfer operation maintains balance integrity and correctness. +
+
+zkrp_new_balance: ristretto255_bulletproofs::RangeProof +
+
+ Range proof ensuring that the resulting balance chunks for the sender are normalized (i.e., within the 16-bit limit). +
+
+zkrp_transfer_amount: ristretto255_bulletproofs::RangeProof +
+
+ Range proof ensuring that the transferred amount chunks are normalized (i.e., within the 16-bit limit). +
+
+ + +
+ + + +## Struct `NormalizationProof` + +Represents the proof structure for validating a normalization operation. + + +
struct NormalizationProof has drop
+
+ + + +
+Fields + + +
+
+sigma_proof: confidential_proof::NormalizationSigmaProof +
+
+ Sigma proof ensuring that the normalization operation maintains balance integrity. +
+
+zkrp_new_balance: ristretto255_bulletproofs::RangeProof +
+
+ Range proof ensuring that the resulting balance chunks are normalized (i.e., within the 16-bit limit). +
+
+ + +
+ + + +## Struct `RotationProof` + +Represents the proof structure for validating a key rotation operation. + + +
struct RotationProof has drop
+
+ + + +
+Fields + + +
+
+sigma_proof: confidential_proof::RotationSigmaProof +
+
+ Sigma proof ensuring that the key rotation operation preserves balance integrity. +
+
+zkrp_new_balance: ristretto255_bulletproofs::RangeProof +
+
+ Range proof ensuring that the resulting balance chunks after key rotation are normalized (i.e., within the 16-bit limit). +
+
+ + +
+ + + +## Struct `WithdrawalSigmaProofXs` + + + +
struct WithdrawalSigmaProofXs has drop
+
+ + + +
+Fields + + +
+
+x1: ristretto255::CompressedRistretto +
+
+ +
+
+x2: ristretto255::CompressedRistretto +
+
+ +
+
+x3s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+x4s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawalSigmaProofAlphas` + + + +
struct WithdrawalSigmaProofAlphas has drop
+
+ + + +
+Fields + + +
+
+a1s: vector<ristretto255::Scalar> +
+
+ +
+
+a2: ristretto255::Scalar +
+
+ +
+
+a3: ristretto255::Scalar +
+
+ +
+
+a4s: vector<ristretto255::Scalar> +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawalSigmaProofGammas` + + + +
struct WithdrawalSigmaProofGammas has drop
+
+ + + +
+Fields + + +
+
+g1: ristretto255::Scalar +
+
+ +
+
+g2: ristretto255::Scalar +
+
+ +
+
+g3s: vector<ristretto255::Scalar> +
+
+ +
+
+g4s: vector<ristretto255::Scalar> +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawalSigmaProof` + + + +
struct WithdrawalSigmaProof has drop
+
+ + + +
+Fields + + +
+
+alphas: confidential_proof::WithdrawalSigmaProofAlphas +
+
+ +
+
+xs: confidential_proof::WithdrawalSigmaProofXs +
+
+ +
+
+ + +
+ + + +## Struct `TransferSigmaProofXs` + + + +
struct TransferSigmaProofXs has drop
+
+ + + +
+Fields + + +
+
+x1: ristretto255::CompressedRistretto +
+
+ +
+
+x2s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+x3s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+x4s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+x5: ristretto255::CompressedRistretto +
+
+ +
+
+x6s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+x7s: vector<vector<ristretto255::CompressedRistretto>> +
+
+ +
+
+x8s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+ + +
+ + + +## Struct `TransferSigmaProofAlphas` + + + +
struct TransferSigmaProofAlphas has drop
+
+ + + +
+Fields + + +
+
+a1s: vector<ristretto255::Scalar> +
+
+ +
+
+a2: ristretto255::Scalar +
+
+ +
+
+a3s: vector<ristretto255::Scalar> +
+
+ +
+
+a4s: vector<ristretto255::Scalar> +
+
+ +
+
+a5: ristretto255::Scalar +
+
+ +
+
+a6s: vector<ristretto255::Scalar> +
+
+ +
+
+ + +
+ + + +## Struct `TransferSigmaProofGammas` + + + +
struct TransferSigmaProofGammas has drop
+
+ + + +
+Fields + + +
+
+g1: ristretto255::Scalar +
+
+ +
+
+g2s: vector<ristretto255::Scalar> +
+
+ +
+
+g3s: vector<ristretto255::Scalar> +
+
+ +
+
+g4s: vector<ristretto255::Scalar> +
+
+ +
+
+g5: ristretto255::Scalar +
+
+ +
+
+g6s: vector<ristretto255::Scalar> +
+
+ +
+
+g7s: vector<vector<ristretto255::Scalar>> +
+
+ +
+
+g8s: vector<ristretto255::Scalar> +
+
+ +
+
+ + +
+ + + +## Struct `TransferSigmaProof` + + + +
struct TransferSigmaProof has drop
+
+ + + +
+Fields + + +
+
+alphas: confidential_proof::TransferSigmaProofAlphas +
+
+ +
+
+xs: confidential_proof::TransferSigmaProofXs +
+
+ +
+
+ + +
+ + + +## Struct `NormalizationSigmaProofXs` + + + +
struct NormalizationSigmaProofXs has drop
+
+ + + +
+Fields + + +
+
+x1: ristretto255::CompressedRistretto +
+
+ +
+
+x2: ristretto255::CompressedRistretto +
+
+ +
+
+x3s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+x4s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+ + +
+ + + +## Struct `NormalizationSigmaProofAlphas` + + + +
struct NormalizationSigmaProofAlphas has drop
+
+ + + +
+Fields + + +
+
+a1s: vector<ristretto255::Scalar> +
+
+ +
+
+a2: ristretto255::Scalar +
+
+ +
+
+a3: ristretto255::Scalar +
+
+ +
+
+a4s: vector<ristretto255::Scalar> +
+
+ +
+
+ + +
+ + + +## Struct `NormalizationSigmaProofGammas` + + + +
struct NormalizationSigmaProofGammas has drop
+
+ + + +
+Fields + + +
+
+g1: ristretto255::Scalar +
+
+ +
+
+g2: ristretto255::Scalar +
+
+ +
+
+g3s: vector<ristretto255::Scalar> +
+
+ +
+
+g4s: vector<ristretto255::Scalar> +
+
+ +
+
+ + +
+ + + +## Struct `NormalizationSigmaProof` + + + +
struct NormalizationSigmaProof has drop
+
+ + + +
+Fields + + +
+
+alphas: confidential_proof::NormalizationSigmaProofAlphas +
+
+ +
+
+xs: confidential_proof::NormalizationSigmaProofXs +
+
+ +
+
+ + +
+ + + +## Struct `RotationSigmaProofXs` + + + +
struct RotationSigmaProofXs has drop
+
+ + + +
+Fields + + +
+
+x1: ristretto255::CompressedRistretto +
+
+ +
+
+x2: ristretto255::CompressedRistretto +
+
+ +
+
+x3: ristretto255::CompressedRistretto +
+
+ +
+
+x4s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+x5s: vector<ristretto255::CompressedRistretto> +
+
+ +
+
+ + +
+ + + +## Struct `RotationSigmaProofAlphas` + + + +
struct RotationSigmaProofAlphas has drop
+
+ + + +
+Fields + + +
+
+a1s: vector<ristretto255::Scalar> +
+
+ +
+
+a2: ristretto255::Scalar +
+
+ +
+
+a3: ristretto255::Scalar +
+
+ +
+
+a4: ristretto255::Scalar +
+
+ +
+
+a5s: vector<ristretto255::Scalar> +
+
+ +
+
+ + +
+ + + +## Struct `RotationSigmaProofGammas` + + + +
struct RotationSigmaProofGammas has drop
+
+ + + +
+Fields + + +
+
+g1: ristretto255::Scalar +
+
+ +
+
+g2: ristretto255::Scalar +
+
+ +
+
+g3: ristretto255::Scalar +
+
+ +
+
+g4s: vector<ristretto255::Scalar> +
+
+ +
+
+g5s: vector<ristretto255::Scalar> +
+
+ +
+
+ + +
+ + + +## Struct `RotationSigmaProof` + + + +
struct RotationSigmaProof has drop
+
+ + + +
+Fields + + +
+
+alphas: confidential_proof::RotationSigmaProofAlphas +
+
+ +
+
+xs: confidential_proof::RotationSigmaProofXs +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const BULLETPROOFS_DST: vector<u8> = [65, 112, 116, 111, 115, 67, 111, 110, 102, 105, 100, 101, 110, 116, 105, 97, 108, 65, 115, 115, 101, 116, 47, 66, 117, 108, 108, 101, 116, 112, 114, 111, 111, 102, 82, 97, 110, 103, 101, 80, 114, 111, 111, 102];
+
+ + + + + + + +
const BULLETPROOFS_NUM_BITS: u64 = 16;
+
+ + + + + + + +
const ERANGE_PROOF_VERIFICATION_FAILED: u64 = 2;
+
+ + + + + + + +
const ESIGMA_PROTOCOL_VERIFY_FAILED: u64 = 1;
+
+ + + + + + + +
const FIAT_SHAMIR_NORMALIZATION_SIGMA_DST: vector<u8> = [65, 112, 116, 111, 115, 67, 111, 110, 102, 105, 100, 101, 110, 116, 105, 97, 108, 65, 115, 115, 101, 116, 47, 78, 111, 114, 109, 97, 108, 105, 122, 97, 116, 105, 111, 110, 80, 114, 111, 111, 102, 70, 105, 97, 116, 83, 104, 97, 109, 105, 114];
+
+ + + + + + + +
const FIAT_SHAMIR_ROTATION_SIGMA_DST: vector<u8> = [65, 112, 116, 111, 115, 67, 111, 110, 102, 105, 100, 101, 110, 116, 105, 97, 108, 65, 115, 115, 101, 116, 47, 82, 111, 116, 97, 116, 105, 111, 110, 80, 114, 111, 111, 102, 70, 105, 97, 116, 83, 104, 97, 109, 105, 114];
+
+ + + + + + + +
const FIAT_SHAMIR_TRANSFER_SIGMA_DST: vector<u8> = [65, 112, 116, 111, 115, 67, 111, 110, 102, 105, 100, 101, 110, 116, 105, 97, 108, 65, 115, 115, 101, 116, 47, 84, 114, 97, 110, 115, 102, 101, 114, 80, 114, 111, 111, 102, 70, 105, 97, 116, 83, 104, 97, 109, 105, 114];
+
+ + + + + + + +
const FIAT_SHAMIR_WITHDRAWAL_SIGMA_DST: vector<u8> = [65, 112, 116, 111, 115, 67, 111, 110, 102, 105, 100, 101, 110, 116, 105, 97, 108, 65, 115, 115, 101, 116, 47, 87, 105, 116, 104, 100, 114, 97, 119, 97, 108, 80, 114, 111, 111, 102, 70, 105, 97, 116, 83, 104, 97, 109, 105, 114];
+
+ + + + + +## Function `verify_withdrawal_proof` + +Verifies the validity of the withdraw operation. + +This function ensures that the provided proof (WithdrawalProof) meets the following conditions: +1. The current balance (current_balance) and new balance (new_balance) encrypt the corresponding values +under the same encryption key (ek) before and after the withdrawal of the specified amount (amount), respectively. +2. The relationship new_balance = current_balance - amount holds, verifying that the withdrawal amount is deducted correctly. +3. The new balance (new_balance) is normalized, with each chunk adhering to the range [0, 2^16). + +If all conditions are satisfied, the proof validates the withdrawal; otherwise, the function causes an error. + + +
public fun verify_withdrawal_proof(ek: &ristretto255_twisted_elgamal::CompressedPubkey, amount: u64, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, proof: &confidential_proof::WithdrawalProof)
+
+ + + +
+Implementation + + +
public fun verify_withdrawal_proof(
+    ek: &twisted_elgamal::CompressedPubkey,
+    amount: u64,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    proof: &WithdrawalProof)
+{
+    verify_withdrawal_sigma_proof(ek, amount, current_balance, new_balance, &proof.sigma_proof);
+    verify_new_balance_range_proof(new_balance, &proof.zkrp_new_balance);
+}
+
+ + + +
+ + + +## Function `verify_transfer_proof` + +Verifies the validity of the confidential_transfer operation. + +This function ensures that the provided proof (TransferProof) meets the following conditions: +1. The transferred amount (recipient_amount and sender_amount) and the auditors' amounts +(auditor_amounts), if provided, encrypt the transfer value using the recipient's, sender's, +and auditors' encryption keys, repectively. +2. The sender's current balance (current_balance) and new balance (new_balance) encrypt the corresponding values +under the sender's encryption key (sender_ek) before and after the transfer, respectively. +3. The relationship new_balance = current_balance - transfer_amount is maintained, ensuring balance integrity. +4. The transferred value (recipient_amount) is properly normalized, with each chunk adhering to the range [0, 2^16). +5. The sender's new balance is normalized, with each chunk in new_balance also adhering to the range [0, 2^16). + +If all conditions are satisfied, the proof validates the transfer; otherwise, the function causes an error. + + +
public fun verify_transfer_proof(sender_ek: &ristretto255_twisted_elgamal::CompressedPubkey, recipient_ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, sender_amount: &confidential_balance::ConfidentialBalance, recipient_amount: &confidential_balance::ConfidentialBalance, auditor_eks: &vector<ristretto255_twisted_elgamal::CompressedPubkey>, auditor_amounts: &vector<confidential_balance::ConfidentialBalance>, proof: &confidential_proof::TransferProof)
+
+ + + +
+Implementation + + +
public fun verify_transfer_proof(
+    sender_ek: &twisted_elgamal::CompressedPubkey,
+    recipient_ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    sender_amount: &confidential_balance::ConfidentialBalance,
+    recipient_amount: &confidential_balance::ConfidentialBalance,
+    auditor_eks: &vector<twisted_elgamal::CompressedPubkey>,
+    auditor_amounts: &vector<confidential_balance::ConfidentialBalance>,
+    proof: &TransferProof)
+{
+    verify_transfer_sigma_proof(
+        sender_ek,
+        recipient_ek,
+        current_balance,
+        new_balance,
+        sender_amount,
+        recipient_amount,
+        auditor_eks,
+        auditor_amounts,
+        &proof.sigma_proof
+    );
+    verify_new_balance_range_proof(new_balance, &proof.zkrp_new_balance);
+    verify_transfer_amount_range_proof(recipient_amount, &proof.zkrp_transfer_amount);
+}
+
+ + + +
+ + + +## Function `verify_normalization_proof` + +Verifies the validity of the normalize operation. + +This function ensures that the provided proof (NormalizationProof) meets the following conditions: +1. The current balance (current_balance) and new balance (new_balance) encrypt the same value +under the same provided encryption key (ek), verifying that the normalization process preserves the balance value. +2. The new balance (new_balance) is properly normalized, with each chunk adhering to the range [0, 2^16), +as verified through the range proof in the normalization process. + +If all conditions are satisfied, the proof validates the normalization; otherwise, the function causes an error. + + +
public fun verify_normalization_proof(ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, proof: &confidential_proof::NormalizationProof)
+
+ + + +
+Implementation + + +
public fun verify_normalization_proof(
+    ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    proof: &NormalizationProof)
+{
+    verify_normalization_sigma_proof(ek, current_balance, new_balance, &proof.sigma_proof);
+    verify_new_balance_range_proof(new_balance, &proof.zkrp_new_balance);
+}
+
+ + + +
+ + + +## Function `verify_rotation_proof` + +Verifies the validity of the rotate_encryption_key operation. + +This function ensures that the provided proof (RotationProof) meets the following conditions: +1. The current balance (current_balance) and new balance (new_balance) encrypt the same value under the +current encryption key (current_ek) and the new encryption key (new_ek), respectively, verifying +that the key rotation preserves the balance value. +2. The new balance (new_balance) is properly normalized, with each chunk adhering to the range [0, 2^16), +ensuring balance integrity after the key rotation. + +If all conditions are satisfied, the proof validates the key rotation; otherwise, the function causes an error. + + +
public fun verify_rotation_proof(current_ek: &ristretto255_twisted_elgamal::CompressedPubkey, new_ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, proof: &confidential_proof::RotationProof)
+
+ + + +
+Implementation + + +
public fun verify_rotation_proof(
+    current_ek: &twisted_elgamal::CompressedPubkey,
+    new_ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    proof: &RotationProof)
+{
+    verify_rotation_sigma_proof(current_ek, new_ek, current_balance, new_balance, &proof.sigma_proof);
+    verify_new_balance_range_proof(new_balance, &proof.zkrp_new_balance);
+}
+
+ + + +
+ + + +## Function `verify_withdrawal_sigma_proof` + +Verifies the validity of the WithdrawalSigmaProof. + + +
fun verify_withdrawal_sigma_proof(ek: &ristretto255_twisted_elgamal::CompressedPubkey, amount: u64, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, proof: &confidential_proof::WithdrawalSigmaProof)
+
+ + + +
+Implementation + + +
fun verify_withdrawal_sigma_proof(
+    ek: &twisted_elgamal::CompressedPubkey,
+    amount: u64,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    proof: &WithdrawalSigmaProof)
+{
+    let amount_chunks = confidential_balance::split_into_chunks_u64(amount);
+    let amount = ristretto255::new_scalar_from_u64(amount);
+
+    let rho = fiat_shamir_withdrawal_sigma_proof_challenge(ek, &amount_chunks, current_balance, &proof.xs);
+
+    let gammas = msm_withdrawal_gammas(&rho);
+
+    let scalars_lhs = vector[gammas.g1, gammas.g2];
+    scalars_lhs.append(gammas.g3s);
+    scalars_lhs.append(gammas.g4s);
+
+    let points_lhs = vector[
+        ristretto255::point_decompress(&proof.xs.x1),
+        ristretto255::point_decompress(&proof.xs.x2)
+    ];
+    points_lhs.append(proof.xs.x3s.map_ref(|x| ristretto255::point_decompress(x)));
+    points_lhs.append(proof.xs.x4s.map_ref(|x| ristretto255::point_decompress(x)));
+
+    let scalar_g = scalar_linear_combination(
+        &proof.alphas.a1s,
+        &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16))
+    );
+    ristretto255::scalar_mul_assign(&mut scalar_g, &gammas.g1);
+    ristretto255::scalar_add_assign(
+        &mut scalar_g,
+        &scalar_linear_combination(&gammas.g3s, &proof.alphas.a1s)
+    );
+    ristretto255::scalar_sub_assign(&mut scalar_g, &scalar_mul_3(&gammas.g1, &rho, &amount));
+
+    let scalar_h = ristretto255::scalar_mul(&gammas.g2, &proof.alphas.a3);
+    ristretto255::scalar_add_assign(
+        &mut scalar_h,
+        &scalar_linear_combination(&gammas.g3s, &proof.alphas.a4s)
+    );
+
+    let scalar_ek = ristretto255::scalar_mul(&gammas.g2, &rho);
+    ristretto255::scalar_add_assign(
+        &mut scalar_ek,
+        &scalar_linear_combination(&gammas.g4s, &proof.alphas.a4s)
+    );
+
+    let scalars_current_balance_d = vector::range(0, 8).map(|i| {
+        scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16))
+    });
+
+    let scalars_new_balance_d = vector::range(0, 8).map(|i| {
+        ristretto255::scalar_mul(&gammas.g4s[i], &rho)
+    });
+
+    let scalars_current_balance_c = vector::range(0, 8).map(|i| {
+        scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16))
+    });
+
+    let scalars_new_balance_c = vector::range(0, 8).map(|i| {
+        ristretto255::scalar_mul(&gammas.g3s[i], &rho)
+    });
+
+    let scalars_rhs = vector[scalar_g, scalar_h, scalar_ek];
+    scalars_rhs.append(scalars_current_balance_d);
+    scalars_rhs.append(scalars_new_balance_d);
+    scalars_rhs.append(scalars_current_balance_c);
+    scalars_rhs.append(scalars_new_balance_c);
+
+    let points_rhs = vector[
+        ristretto255::basepoint(),
+        ristretto255::hash_to_point_base(),
+        twisted_elgamal::pubkey_to_point(ek)
+    ];
+    points_rhs.append(confidential_balance::balance_to_points_d(current_balance));
+    points_rhs.append(confidential_balance::balance_to_points_d(new_balance));
+    points_rhs.append(confidential_balance::balance_to_points_c(current_balance));
+    points_rhs.append(confidential_balance::balance_to_points_c(new_balance));
+
+    let lhs = ristretto255::multi_scalar_mul(&points_lhs, &scalars_lhs);
+    let rhs = ristretto255::multi_scalar_mul(&points_rhs, &scalars_rhs);
+
+    assert!(
+        ristretto255::point_equals(&lhs, &rhs),
+        error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)
+    );
+}
+
+ + + +
+ + + +## Function `verify_transfer_sigma_proof` + +Verifies the validity of the TransferSigmaProof. + + +
fun verify_transfer_sigma_proof(sender_ek: &ristretto255_twisted_elgamal::CompressedPubkey, recipient_ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, sender_amount: &confidential_balance::ConfidentialBalance, recipient_amount: &confidential_balance::ConfidentialBalance, auditor_eks: &vector<ristretto255_twisted_elgamal::CompressedPubkey>, auditor_amounts: &vector<confidential_balance::ConfidentialBalance>, proof: &confidential_proof::TransferSigmaProof)
+
+ + + +
+Implementation + + +
fun verify_transfer_sigma_proof(
+    sender_ek: &twisted_elgamal::CompressedPubkey,
+    recipient_ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    sender_amount: &confidential_balance::ConfidentialBalance,
+    recipient_amount: &confidential_balance::ConfidentialBalance,
+    auditor_eks: &vector<twisted_elgamal::CompressedPubkey>,
+    auditor_amounts: &vector<confidential_balance::ConfidentialBalance>,
+    proof: &TransferSigmaProof)
+{
+    let rho = fiat_shamir_transfer_sigma_proof_challenge(
+        sender_ek,
+        recipient_ek,
+        current_balance,
+        new_balance,
+        sender_amount,
+        recipient_amount,
+        auditor_eks,
+        auditor_amounts,
+        &proof.xs
+    );
+
+    let gammas = msm_transfer_gammas(&rho, proof.xs.x7s.length());
+
+    let scalars_lhs = vector[gammas.g1];
+    scalars_lhs.append(gammas.g2s);
+    scalars_lhs.append(gammas.g3s);
+    scalars_lhs.append(gammas.g4s);
+    scalars_lhs.push_back(gammas.g5);
+    scalars_lhs.append(gammas.g6s);
+    gammas.g7s.for_each(|gamma| scalars_lhs.append(gamma));
+    scalars_lhs.append(gammas.g8s);
+
+    let points_lhs = vector[
+        ristretto255::point_decompress(&proof.xs.x1),
+    ];
+    points_lhs.append(proof.xs.x2s.map_ref(|x| ristretto255::point_decompress(x)));
+    points_lhs.append(proof.xs.x3s.map_ref(|x| ristretto255::point_decompress(x)));
+    points_lhs.append(proof.xs.x4s.map_ref(|x| ristretto255::point_decompress(x)));
+    points_lhs.push_back(ristretto255::point_decompress(&proof.xs.x5));
+    points_lhs.append(proof.xs.x6s.map_ref(|x| ristretto255::point_decompress(x)));
+    proof.xs.x7s.for_each_ref(|xs| {
+        points_lhs.append(xs.map_ref(|x| ristretto255::point_decompress(x)));
+    });
+    points_lhs.append(proof.xs.x8s.map_ref(|x| ristretto255::point_decompress(x)));
+
+    let scalar_g = scalar_linear_combination(
+        &proof.alphas.a1s,
+        &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16))
+    );
+    ristretto255::scalar_mul_assign(&mut scalar_g, &gammas.g1);
+    vector::range(0, 4).for_each(|i| {
+        ristretto255::scalar_add_assign(
+            &mut scalar_g,
+            &ristretto255::scalar_mul(&gammas.g4s[i], &proof.alphas.a4s[i])
+        );
+    });
+    ristretto255::scalar_add_assign(
+        &mut scalar_g,
+        &scalar_linear_combination(&gammas.g6s, &proof.alphas.a1s)
+    );
+
+    let scalar_h = ristretto255::scalar_mul(&gammas.g5, &proof.alphas.a5);
+    vector::range(0, 8).for_each(|i| {
+        ristretto255::scalar_add_assign(
+            &mut scalar_h,
+            &scalar_mul_3(&gammas.g1, &proof.alphas.a6s[i], &new_scalar_from_pow2(i * 16))
+        );
+    });
+    vector::range(0, 4).for_each(|i| {
+        ristretto255::scalar_sub_assign(
+            &mut scalar_h,
+            &scalar_mul_3(&gammas.g1, &proof.alphas.a3s[i], &new_scalar_from_pow2(i * 16))
+        );
+    });
+    ristretto255::scalar_add_assign(
+        &mut scalar_h,
+        &scalar_linear_combination(&gammas.g4s, &proof.alphas.a3s)
+    );
+    ristretto255::scalar_add_assign(
+        &mut scalar_h,
+        &scalar_linear_combination(&gammas.g6s, &proof.alphas.a6s)
+    );
+
+    let scalar_sender_ek = scalar_linear_combination(&gammas.g2s, &proof.alphas.a6s);
+    ristretto255::scalar_add_assign(&mut scalar_sender_ek, &ristretto255::scalar_mul(&gammas.g5, &rho));
+    ristretto255::scalar_add_assign(
+        &mut scalar_sender_ek,
+        &scalar_linear_combination(&gammas.g8s, &proof.alphas.a3s)
+    );
+
+    let scalar_recipient_ek = ristretto255::scalar_zero();
+    vector::range(0, 4).for_each(|i| {
+        ristretto255::scalar_add_assign(
+            &mut scalar_recipient_ek,
+            &ristretto255::scalar_mul(&gammas.g3s[i], &proof.alphas.a3s[i])
+        );
+    });
+
+    let scalar_ek_auditors = gammas.g7s.map_ref(|gamma: &vector<Scalar>| {
+        let scalar_auditor_ek = ristretto255::scalar_zero();
+        vector::range(0, 4).for_each(|i| {
+            ristretto255::scalar_add_assign(
+                &mut scalar_auditor_ek,
+                &ristretto255::scalar_mul(&gamma[i], &proof.alphas.a3s[i])
+            );
+        });
+        scalar_auditor_ek
+    });
+
+    let scalars_new_balance_d = vector::range(0, 8).map(|i| {
+        let scalar = ristretto255::scalar_mul(&gammas.g2s[i], &rho);
+        ristretto255::scalar_sub_assign(
+            &mut scalar,
+            &scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16))
+        );
+        scalar
+    });
+
+    let scalars_recipient_amount_d = vector::range(0, 4).map(|i| {
+        ristretto255::scalar_mul(&gammas.g3s[i], &rho)
+    });
+
+    let scalars_current_balance_d = vector::range(0, 8).map(|i| {
+        scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16))
+    });
+
+    let scalars_auditor_amount_d = gammas.g7s.map_ref(|gamma| {
+        gamma.map_ref(|gamma| ristretto255::scalar_mul(gamma, &rho))
+    });
+
+    let scalars_sender_amount_d = vector::range(0, 4).map(|i| {
+        ristretto255::scalar_mul(&gammas.g8s[i], &rho)
+    });
+
+    let scalars_current_balance_c = vector::range(0, 8).map(|i| {
+        scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16))
+    });
+
+    let scalars_transfer_amount_c = vector::range(0, 4).map(|i| {
+        let scalar = ristretto255::scalar_mul(&gammas.g4s[i], &rho);
+        ristretto255::scalar_sub_assign(
+            &mut scalar,
+            &scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16))
+        );
+        scalar
+    });
+
+    let scalars_new_balance_c = vector::range(0, 8).map(|i| {
+        ristretto255::scalar_mul(&gammas.g6s[i], &rho)
+    });
+
+    let scalars_rhs = vector[scalar_g, scalar_h, scalar_sender_ek, scalar_recipient_ek];
+    scalars_rhs.append(scalar_ek_auditors);
+    scalars_rhs.append(scalars_new_balance_d);
+    scalars_rhs.append(scalars_recipient_amount_d);
+    scalars_rhs.append(scalars_current_balance_d);
+    scalars_auditor_amount_d.for_each(|scalars| scalars_rhs.append(scalars));
+    scalars_rhs.append(scalars_sender_amount_d);
+    scalars_rhs.append(scalars_current_balance_c);
+    scalars_rhs.append(scalars_transfer_amount_c);
+    scalars_rhs.append(scalars_new_balance_c);
+
+    let points_rhs = vector[
+        ristretto255::basepoint(),
+        ristretto255::hash_to_point_base(),
+        twisted_elgamal::pubkey_to_point(sender_ek),
+        twisted_elgamal::pubkey_to_point(recipient_ek)
+    ];
+    points_rhs.append(auditor_eks.map_ref(|ek| twisted_elgamal::pubkey_to_point(ek)));
+    points_rhs.append(confidential_balance::balance_to_points_d(new_balance));
+    points_rhs.append(confidential_balance::balance_to_points_d(recipient_amount));
+    points_rhs.append(confidential_balance::balance_to_points_d(current_balance));
+    auditor_amounts.for_each_ref(|balance| {
+        points_rhs.append(confidential_balance::balance_to_points_d(balance));
+    });
+    points_rhs.append(confidential_balance::balance_to_points_d(sender_amount));
+    points_rhs.append(confidential_balance::balance_to_points_c(current_balance));
+    points_rhs.append(confidential_balance::balance_to_points_c(recipient_amount));
+    points_rhs.append(confidential_balance::balance_to_points_c(new_balance));
+
+    let lhs = ristretto255::multi_scalar_mul(&points_lhs, &scalars_lhs);
+    let rhs = ristretto255::multi_scalar_mul(&points_rhs, &scalars_rhs);
+
+    assert!(
+        ristretto255::point_equals(&lhs, &rhs),
+        error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)
+    );
+}
+
+ + + +
+ + + +## Function `verify_normalization_sigma_proof` + +Verifies the validity of the NormalizationSigmaProof. + + +
fun verify_normalization_sigma_proof(ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, proof: &confidential_proof::NormalizationSigmaProof)
+
+ + + +
+Implementation + + +
fun verify_normalization_sigma_proof(
+    ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    proof: &NormalizationSigmaProof)
+{
+    let rho = fiat_shamir_normalization_sigma_proof_challenge(ek, current_balance, new_balance, &proof.xs);
+    let gammas = msm_normalization_gammas(&rho);
+
+    let scalars_lhs = vector[gammas.g1, gammas.g2];
+    scalars_lhs.append(gammas.g3s);
+    scalars_lhs.append(gammas.g4s);
+
+    let points_lhs = vector[
+        ristretto255::point_decompress(&proof.xs.x1),
+        ristretto255::point_decompress(&proof.xs.x2)
+    ];
+    points_lhs.append(proof.xs.x3s.map_ref(|x| ristretto255::point_decompress(x)));
+    points_lhs.append(proof.xs.x4s.map_ref(|x| ristretto255::point_decompress(x)));
+
+    let scalar_g = scalar_linear_combination(
+        &proof.alphas.a1s,
+        &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16))
+    );
+    ristretto255::scalar_mul_assign(&mut scalar_g, &gammas.g1);
+    ristretto255::scalar_add_assign(
+        &mut scalar_g,
+        &scalar_linear_combination(&gammas.g3s, &proof.alphas.a1s)
+    );
+
+    let scalar_h = ristretto255::scalar_mul(&gammas.g2, &proof.alphas.a3);
+    ristretto255::scalar_add_assign(
+        &mut scalar_h,
+        &scalar_linear_combination(&gammas.g3s, &proof.alphas.a4s)
+    );
+
+    let scalar_ek = ristretto255::scalar_mul(&gammas.g2, &rho);
+    ristretto255::scalar_add_assign(
+        &mut scalar_ek,
+        &scalar_linear_combination(&gammas.g4s, &proof.alphas.a4s)
+    );
+
+    let scalars_current_balance_d = vector::range(0, 8).map(|i| {
+        scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16))
+    });
+
+    let scalars_new_balance_d = vector::range(0, 8).map(|i| {
+        ristretto255::scalar_mul(&gammas.g4s[i], &rho)
+    });
+
+    let scalars_current_balance_c = vector::range(0, 8).map(|i| {
+        scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16))
+    });
+
+    let scalars_new_balance_c = vector::range(0, 8).map(|i| {
+        ristretto255::scalar_mul(&gammas.g3s[i], &rho)
+    });
+
+    let scalars_rhs = vector[scalar_g, scalar_h, scalar_ek];
+    scalars_rhs.append(scalars_current_balance_d);
+    scalars_rhs.append(scalars_new_balance_d);
+    scalars_rhs.append(scalars_current_balance_c);
+    scalars_rhs.append(scalars_new_balance_c);
+
+    let points_rhs = vector[
+        ristretto255::basepoint(),
+        ristretto255::hash_to_point_base(),
+        twisted_elgamal::pubkey_to_point(ek)
+    ];
+    points_rhs.append(confidential_balance::balance_to_points_d(current_balance));
+    points_rhs.append(confidential_balance::balance_to_points_d(new_balance));
+    points_rhs.append(confidential_balance::balance_to_points_c(current_balance));
+    points_rhs.append(confidential_balance::balance_to_points_c(new_balance));
+
+    let lhs = ristretto255::multi_scalar_mul(&points_lhs, &scalars_lhs);
+    let rhs = ristretto255::multi_scalar_mul(&points_rhs, &scalars_rhs);
+
+    assert!(
+        ristretto255::point_equals(&lhs, &rhs),
+        error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)
+    );
+}
+
+ + + +
+ + + +## Function `verify_rotation_sigma_proof` + +Verifies the validity of the RotationSigmaProof. + + +
fun verify_rotation_sigma_proof(current_ek: &ristretto255_twisted_elgamal::CompressedPubkey, new_ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, proof: &confidential_proof::RotationSigmaProof)
+
+ + + +
+Implementation + + +
fun verify_rotation_sigma_proof(
+    current_ek: &twisted_elgamal::CompressedPubkey,
+    new_ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    proof: &RotationSigmaProof)
+{
+    let rho = fiat_shamir_rotation_sigma_proof_challenge(
+        current_ek,
+        new_ek,
+        current_balance,
+        new_balance,
+        &proof.xs
+    );
+    let gammas = msm_rotation_gammas(&rho);
+
+    let scalars_lhs = vector[gammas.g1, gammas.g2, gammas.g3];
+    scalars_lhs.append(gammas.g4s);
+    scalars_lhs.append(gammas.g5s);
+
+    let points_lhs = vector[
+        ristretto255::point_decompress(&proof.xs.x1),
+        ristretto255::point_decompress(&proof.xs.x2),
+        ristretto255::point_decompress(&proof.xs.x3)
+    ];
+    points_lhs.append(proof.xs.x4s.map_ref(|x| ristretto255::point_decompress(x)));
+    points_lhs.append(proof.xs.x5s.map_ref(|x| ristretto255::point_decompress(x)));
+
+    let scalar_g = scalar_linear_combination(
+        &proof.alphas.a1s,
+        &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16))
+    );
+    ristretto255::scalar_mul_assign(&mut scalar_g, &gammas.g1);
+    ristretto255::scalar_add_assign(
+        &mut scalar_g,
+        &scalar_linear_combination(&gammas.g4s, &proof.alphas.a1s)
+    );
+
+    let scalar_h = ristretto255::scalar_mul(&gammas.g2, &proof.alphas.a3);
+    ristretto255::scalar_add_assign(&mut scalar_h, &ristretto255::scalar_mul(&gammas.g3, &proof.alphas.a4));
+    ristretto255::scalar_add_assign(
+        &mut scalar_h,
+        &scalar_linear_combination(&gammas.g4s, &proof.alphas.a5s)
+    );
+
+    let scalar_ek_cur = ristretto255::scalar_mul(&gammas.g2, &rho);
+
+    let scalar_ek_new = ristretto255::scalar_mul(&gammas.g3, &rho);
+    ristretto255::scalar_add_assign(
+        &mut scalar_ek_new,
+        &scalar_linear_combination(&gammas.g5s, &proof.alphas.a5s)
+    );
+
+    let scalars_current_balance_d = vector::range(0, 8).map(|i| {
+        scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16))
+    });
+
+    let scalars_new_balance_d = vector::range(0, 8).map(|i| {
+        ristretto255::scalar_mul(&gammas.g5s[i], &rho)
+    });
+
+    let scalars_current_balance_c = vector::range(0, 8).map(|i| {
+        scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16))
+    });
+
+    let scalars_new_balance_c = vector::range(0, 8).map(|i| {
+        ristretto255::scalar_mul(&gammas.g4s[i], &rho)
+    });
+
+    let scalars_rhs = vector[scalar_g, scalar_h, scalar_ek_cur, scalar_ek_new];
+    scalars_rhs.append(scalars_current_balance_d);
+    scalars_rhs.append(scalars_new_balance_d);
+    scalars_rhs.append(scalars_current_balance_c);
+    scalars_rhs.append(scalars_new_balance_c);
+
+    let points_rhs = vector[
+        ristretto255::basepoint(),
+        ristretto255::hash_to_point_base(),
+        twisted_elgamal::pubkey_to_point(current_ek),
+        twisted_elgamal::pubkey_to_point(new_ek)
+    ];
+    points_rhs.append(confidential_balance::balance_to_points_d(current_balance));
+    points_rhs.append(confidential_balance::balance_to_points_d(new_balance));
+    points_rhs.append(confidential_balance::balance_to_points_c(current_balance));
+    points_rhs.append(confidential_balance::balance_to_points_c(new_balance));
+
+    let lhs = ristretto255::multi_scalar_mul(&points_lhs, &scalars_lhs);
+    let rhs = ristretto255::multi_scalar_mul(&points_rhs, &scalars_rhs);
+
+    assert!(
+        ristretto255::point_equals(&lhs, &rhs),
+        error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)
+    );
+}
+
+ + + +
+ + + +## Function `verify_new_balance_range_proof` + +Verifies the validity of the NewBalanceRangeProof. + + +
fun verify_new_balance_range_proof(new_balance: &confidential_balance::ConfidentialBalance, zkrp_new_balance: &ristretto255_bulletproofs::RangeProof)
+
+ + + +
+Implementation + + +
fun verify_new_balance_range_proof(
+    new_balance: &confidential_balance::ConfidentialBalance,
+    zkrp_new_balance: &RangeProof)
+{
+    let balance_c = confidential_balance::balance_to_points_c(new_balance);
+
+    assert!(
+        bulletproofs::verify_batch_range_proof(
+            &balance_c,
+            &ristretto255::basepoint(),
+            &ristretto255::hash_to_point_base(),
+            zkrp_new_balance,
+            BULLETPROOFS_NUM_BITS,
+            BULLETPROOFS_DST
+        ),
+        error::out_of_range(ERANGE_PROOF_VERIFICATION_FAILED)
+    );
+}
+
+ + + +
+ + + +## Function `verify_transfer_amount_range_proof` + +Verifies the validity of the TransferBalanceRangeProof. + + +
fun verify_transfer_amount_range_proof(transfer_amount: &confidential_balance::ConfidentialBalance, zkrp_transfer_amount: &ristretto255_bulletproofs::RangeProof)
+
+ + + +
+Implementation + + +
fun verify_transfer_amount_range_proof(
+    transfer_amount: &confidential_balance::ConfidentialBalance,
+    zkrp_transfer_amount: &RangeProof)
+{
+    let balance_c = confidential_balance::balance_to_points_c(transfer_amount);
+
+    assert!(
+        bulletproofs::verify_batch_range_proof(
+            &balance_c,
+            &ristretto255::basepoint(),
+            &ristretto255::hash_to_point_base(),
+            zkrp_transfer_amount,
+            BULLETPROOFS_NUM_BITS,
+            BULLETPROOFS_DST
+        ),
+        error::out_of_range(ERANGE_PROOF_VERIFICATION_FAILED)
+    );
+}
+
+ + + +
+ + + +## Function `auditors_count_in_transfer_proof` + +Returns the number of range proofs in the provided WithdrawalProof. +Used in the confidential_asset module to validate input parameters of the confidential_transfer function. + + +
public(friend) fun auditors_count_in_transfer_proof(proof: &confidential_proof::TransferProof): u64
+
+ + + +
+Implementation + + +
public(friend) fun auditors_count_in_transfer_proof(proof: &TransferProof): u64 {
+    proof.sigma_proof.xs.x7s.length()
+}
+
+ + + +
+ + + +## Function `deserialize_withdrawal_proof` + +Deserializes the WithdrawalProof from the byte array. +Returns Some(WithdrawalProof) if the deserialization is successful; otherwise, returns None. + + +
public fun deserialize_withdrawal_proof(sigma_proof_bytes: vector<u8>, zkrp_new_balance_bytes: vector<u8>): option::Option<confidential_proof::WithdrawalProof>
+
+ + + +
+Implementation + + +
public fun deserialize_withdrawal_proof(
+    sigma_proof_bytes: vector<u8>,
+    zkrp_new_balance_bytes: vector<u8>): Option<WithdrawalProof>
+{
+    let sigma_proof = deserialize_withdrawal_sigma_proof(sigma_proof_bytes);
+    let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance_bytes);
+
+    if (sigma_proof.is_none()) {
+        return option::none()
+    };
+
+    option::some(
+        WithdrawalProof {
+            sigma_proof: sigma_proof.extract(),
+            zkrp_new_balance,
+        }
+    )
+}
+
+ + + +
+ + + +## Function `deserialize_transfer_proof` + +Deserializes the TransferProof from the byte array. +Returns Some(TransferProof) if the deserialization is successful; otherwise, returns None. + + +
public fun deserialize_transfer_proof(sigma_proof_bytes: vector<u8>, zkrp_new_balance_bytes: vector<u8>, zkrp_transfer_amount_bytes: vector<u8>): option::Option<confidential_proof::TransferProof>
+
+ + + +
+Implementation + + +
public fun deserialize_transfer_proof(
+    sigma_proof_bytes: vector<u8>,
+    zkrp_new_balance_bytes: vector<u8>,
+    zkrp_transfer_amount_bytes: vector<u8>): Option<TransferProof>
+{
+    let sigma_proof = deserialize_transfer_sigma_proof(sigma_proof_bytes);
+    let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance_bytes);
+    let zkrp_transfer_amount = bulletproofs::range_proof_from_bytes(zkrp_transfer_amount_bytes);
+
+    if (sigma_proof.is_none()) {
+        return option::none()
+    };
+
+    option::some(
+        TransferProof {
+            sigma_proof: sigma_proof.extract(),
+            zkrp_new_balance,
+            zkrp_transfer_amount,
+        }
+    )
+}
+
+ + + +
+ + + +## Function `deserialize_normalization_proof` + +Deserializes the NormalizationProof from the byte array. +Returns Some(NormalizationProof) if the deserialization is successful; otherwise, returns None. + + +
public fun deserialize_normalization_proof(sigma_proof_bytes: vector<u8>, zkrp_new_balance_bytes: vector<u8>): option::Option<confidential_proof::NormalizationProof>
+
+ + + +
+Implementation + + +
public fun deserialize_normalization_proof(
+    sigma_proof_bytes: vector<u8>,
+    zkrp_new_balance_bytes: vector<u8>): Option<NormalizationProof>
+{
+    let sigma_proof = deserialize_normalization_sigma_proof(sigma_proof_bytes);
+    let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance_bytes);
+
+    if (sigma_proof.is_none()) {
+        return option::none()
+    };
+
+    option::some(
+        NormalizationProof {
+            sigma_proof: sigma_proof.extract(),
+            zkrp_new_balance,
+        }
+    )
+}
+
+ + + +
+ + + +## Function `deserialize_rotation_proof` + +Deserializes the RotationProof from the byte array. +Returns Some(RotationProof) if the deserialization is successful; otherwise, returns None. + + +
public fun deserialize_rotation_proof(sigma_proof_bytes: vector<u8>, zkrp_new_balance_bytes: vector<u8>): option::Option<confidential_proof::RotationProof>
+
+ + + +
+Implementation + + +
public fun deserialize_rotation_proof(
+    sigma_proof_bytes: vector<u8>,
+    zkrp_new_balance_bytes: vector<u8>): Option<RotationProof>
+{
+    let sigma_proof = deserialize_rotation_sigma_proof(sigma_proof_bytes);
+    let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance_bytes);
+
+    if (sigma_proof.is_none()) {
+        return option::none()
+    };
+
+    option::some(
+        RotationProof {
+            sigma_proof: sigma_proof.extract(),
+            zkrp_new_balance,
+        }
+    )
+}
+
+ + + +
+ + + +## Function `deserialize_withdrawal_sigma_proof` + +Deserializes the WithdrawalSigmaProof from the byte array. +Returns Some(WithdrawalSigmaProof) if the deserialization is successful; otherwise, returns None. + + +
fun deserialize_withdrawal_sigma_proof(proof_bytes: vector<u8>): option::Option<confidential_proof::WithdrawalSigmaProof>
+
+ + + +
+Implementation + + +
fun deserialize_withdrawal_sigma_proof(proof_bytes: vector<u8>): Option<WithdrawalSigmaProof> {
+    let alphas_count = 18;
+    let xs_count = 18;
+
+    if (proof_bytes.length() != 32 * xs_count + 32 * alphas_count) {
+        return option::none()
+    };
+
+    let alphas = vector::range(0, alphas_count).map(|i| {
+        ristretto255::new_scalar_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32))
+    });
+    let xs = vector::range(alphas_count, alphas_count + xs_count).map(|i| {
+        ristretto255::new_compressed_point_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32))
+    });
+
+    if (alphas.any(|alpha| alpha.is_none()) || xs.any(|x| x.is_none())) {
+        return option::none()
+    };
+
+    option::some(
+        WithdrawalSigmaProof {
+            alphas: WithdrawalSigmaProofAlphas {
+                a1s: alphas.slice(0, 8).map(|alpha| alpha.extract()),
+                a2: alphas[8].extract(),
+                a3: alphas[9].extract(),
+                a4s: alphas.slice(10, 18).map(|alpha| alpha.extract()),
+            },
+            xs: WithdrawalSigmaProofXs {
+                x1: xs[0].extract(),
+                x2: xs[1].extract(),
+                x3s: xs.slice(2, 10).map(|x| x.extract()),
+                x4s: xs.slice(10, 18).map(|x| x.extract()),
+            },
+        }
+    )
+}
+
+ + + +
+ + + +## Function `deserialize_transfer_sigma_proof` + +Deserializes the TransferSigmaProof from the byte array. +Returns Some(TransferSigmaProof) if the deserialization is successful; otherwise, returns None. + + +
fun deserialize_transfer_sigma_proof(proof_bytes: vector<u8>): option::Option<confidential_proof::TransferSigmaProof>
+
+ + + +
+Implementation + + +
fun deserialize_transfer_sigma_proof(proof_bytes: vector<u8>): Option<TransferSigmaProof> {
+    let alphas_count = 26;
+    let xs_count = 30;
+
+    if (proof_bytes.length() < 32 * xs_count + 32 * alphas_count) {
+        return option::none()
+    };
+
+    // Transfer proof may contain additional four Xs for each auditor.
+    let auditor_xs = proof_bytes.length() - (32 * xs_count + 32 * alphas_count);
+
+    if (auditor_xs % 128 != 0) {
+        return option::none()
+    };
+
+    xs_count += auditor_xs / 32;
+
+    let alphas = vector::range(0, alphas_count).map(|i| {
+        ristretto255::new_scalar_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32))
+    });
+    let xs = vector::range(alphas_count, alphas_count + xs_count).map(|i| {
+        ristretto255::new_compressed_point_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32))
+    });
+
+    if (alphas.any(|alpha| alpha.is_none()) || xs.any(|x| x.is_none())) {
+        return option::none()
+    };
+
+    option::some(
+        TransferSigmaProof {
+            alphas: TransferSigmaProofAlphas {
+                a1s: alphas.slice(0, 8).map(|alpha| alpha.extract()),
+                a2: alphas[8].extract(),
+                a3s: alphas.slice(9, 13).map(|alpha| alpha.extract()),
+                a4s: alphas.slice(13, 17).map(|alpha| alpha.extract()),
+                a5: alphas[17].extract(),
+                a6s: alphas.slice(18, 26).map(|alpha| alpha.extract()),
+            },
+            xs: TransferSigmaProofXs {
+                x1: xs[0].extract(),
+                x2s: xs.slice(1, 9).map(|x| x.extract()),
+                x3s: xs.slice(9, 13).map(|x| x.extract()),
+                x4s: xs.slice(13, 17).map(|x| x.extract()),
+                x5: xs[17].extract(),
+                x6s: xs.slice(18, 26).map(|x| x.extract()),
+                x7s: vector::range_with_step(26, xs_count - 4, 4).map(|i| {
+                    vector::range(i, i + 4).map(|j| xs[j].extract())
+                }),
+                x8s: xs.slice(xs_count - 4, xs_count).map(|x| x.extract()),
+            },
+        }
+    )
+}
+
+ + + +
+ + + +## Function `deserialize_normalization_sigma_proof` + +Deserializes the NormalizationSigmaProof from the byte array. +Returns Some(NormalizationSigmaProof) if the deserialization is successful; otherwise, returns None. + + +
fun deserialize_normalization_sigma_proof(proof_bytes: vector<u8>): option::Option<confidential_proof::NormalizationSigmaProof>
+
+ + + +
+Implementation + + +
fun deserialize_normalization_sigma_proof(proof_bytes: vector<u8>): Option<NormalizationSigmaProof> {
+    let alphas_count = 18;
+    let xs_count = 18;
+
+    if (proof_bytes.length() != 32 * xs_count + 32 * alphas_count) {
+        return option::none()
+    };
+
+    let alphas = vector::range(0, alphas_count).map(|i| {
+        ristretto255::new_scalar_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32))
+    });
+    let xs = vector::range(alphas_count, alphas_count + xs_count).map(|i| {
+        ristretto255::new_compressed_point_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32))
+    });
+
+    if (alphas.any(|alpha| alpha.is_none()) || xs.any(|x| x.is_none())) {
+        return option::none()
+    };
+
+    option::some(
+        NormalizationSigmaProof {
+            alphas: NormalizationSigmaProofAlphas {
+                a1s: alphas.slice(0, 8).map(|alpha| alpha.extract()),
+                a2: alphas[8].extract(),
+                a3: alphas[9].extract(),
+                a4s: alphas.slice(10, 18).map(|alpha| alpha.extract()),
+            },
+            xs: NormalizationSigmaProofXs {
+                x1: xs[0].extract(),
+                x2: xs[1].extract(),
+                x3s: xs.slice(2, 10).map(|x| x.extract()),
+                x4s: xs.slice(10, 18).map(|x| x.extract()),
+            },
+        }
+    )
+}
+
+ + + +
+ + + +## Function `deserialize_rotation_sigma_proof` + +Deserializes the RotationSigmaProof from the byte array. +Returns Some(RotationSigmaProof) if the deserialization is successful; otherwise, returns None. + + +
fun deserialize_rotation_sigma_proof(proof_bytes: vector<u8>): option::Option<confidential_proof::RotationSigmaProof>
+
+ + + +
+Implementation + + +
fun deserialize_rotation_sigma_proof(proof_bytes: vector<u8>): Option<RotationSigmaProof> {
+    let alphas_count = 19;
+    let xs_count = 19;
+
+    if (proof_bytes.length() != 32 * xs_count + 32 * alphas_count) {
+        return option::none()
+    };
+
+    let alphas = vector::range(0, alphas_count).map(|i| {
+        ristretto255::new_scalar_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32))
+    });
+    let xs = vector::range(alphas_count, alphas_count + xs_count).map(|i| {
+        ristretto255::new_compressed_point_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32))
+    });
+
+    if (alphas.any(|alpha| alpha.is_none()) || xs.any(|x| x.is_none())) {
+        return option::none()
+    };
+
+    option::some(
+        RotationSigmaProof {
+            alphas: RotationSigmaProofAlphas {
+                a1s: alphas.slice(0, 8).map(|alpha| alpha.extract()),
+                a2: alphas[8].extract(),
+                a3: alphas[9].extract(),
+                a4: alphas[10].extract(),
+                a5s: alphas.slice(11, 19).map(|alpha| alpha.extract()),
+            },
+            xs: RotationSigmaProofXs {
+                x1: xs[0].extract(),
+                x2: xs[1].extract(),
+                x3: xs[2].extract(),
+                x4s: xs.slice(3, 11).map(|x| x.extract()),
+                x5s: xs.slice(11, 19).map(|x| x.extract()),
+            },
+        }
+    )
+}
+
+ + + +
+ + + +## Function `get_fiat_shamir_withdrawal_sigma_dst` + +Returns the Fiat Shamir DST for the WithdrawalSigmaProof. + + +
#[view]
+public fun get_fiat_shamir_withdrawal_sigma_dst(): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_fiat_shamir_withdrawal_sigma_dst(): vector<u8> {
+    FIAT_SHAMIR_WITHDRAWAL_SIGMA_DST
+}
+
+ + + +
+ + + +## Function `get_fiat_shamir_transfer_sigma_dst` + +Returns the Fiat Shamir DST for the TransferSigmaProof. + + +
#[view]
+public fun get_fiat_shamir_transfer_sigma_dst(): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_fiat_shamir_transfer_sigma_dst(): vector<u8> {
+    FIAT_SHAMIR_TRANSFER_SIGMA_DST
+}
+
+ + + +
+ + + +## Function `get_fiat_shamir_normalization_sigma_dst` + +Returns the Fiat Shamir DST for the NormalizationSigmaProof. + + +
#[view]
+public fun get_fiat_shamir_normalization_sigma_dst(): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_fiat_shamir_normalization_sigma_dst(): vector<u8> {
+    FIAT_SHAMIR_NORMALIZATION_SIGMA_DST
+}
+
+ + + +
+ + + +## Function `get_fiat_shamir_rotation_sigma_dst` + +Returns the Fiat Shamir DST for the RotationSigmaProof. + + +
#[view]
+public fun get_fiat_shamir_rotation_sigma_dst(): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_fiat_shamir_rotation_sigma_dst(): vector<u8> {
+    FIAT_SHAMIR_ROTATION_SIGMA_DST
+}
+
+ + + +
+ + + +## Function `get_bulletproofs_dst` + +Returns the DST for the range proofs. + + +
#[view]
+public fun get_bulletproofs_dst(): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_bulletproofs_dst(): vector<u8> {
+    BULLETPROOFS_DST
+}
+
+ + + +
+ + + +## Function `get_bulletproofs_num_bits` + +Returns the maximum number of bits of the normalized chunk for the range proofs. + + +
#[view]
+public fun get_bulletproofs_num_bits(): u64
+
+ + + +
+Implementation + + +
public fun get_bulletproofs_num_bits(): u64 {
+    BULLETPROOFS_NUM_BITS
+}
+
+ + + +
+ + + +## Function `fiat_shamir_withdrawal_sigma_proof_challenge` + +Derives the Fiat-Shamir challenge for the WithdrawalSigmaProof. + + +
fun fiat_shamir_withdrawal_sigma_proof_challenge(ek: &ristretto255_twisted_elgamal::CompressedPubkey, amount_chunks: &vector<ristretto255::Scalar>, current_balance: &confidential_balance::ConfidentialBalance, proof_xs: &confidential_proof::WithdrawalSigmaProofXs): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun fiat_shamir_withdrawal_sigma_proof_challenge(
+    ek: &twisted_elgamal::CompressedPubkey,
+    amount_chunks: &vector<Scalar>,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    proof_xs: &WithdrawalSigmaProofXs): Scalar
+{
+    // rho = H(DST, G, H, P, v_{1..4}, (C_cur, D_cur)_{1..8}, X_{1..18})
+    let bytes = FIAT_SHAMIR_WITHDRAWAL_SIGMA_DST;
+
+    bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::basepoint_compressed()));
+    bytes.append(
+        ristretto255::compressed_point_to_bytes(ristretto255::point_compress(&ristretto255::hash_to_point_base()))
+    );
+    bytes.append(twisted_elgamal::pubkey_to_bytes(ek));
+    amount_chunks.for_each_ref(|chunk| {
+        bytes.append(ristretto255::scalar_to_bytes(chunk));
+    });
+    bytes.append(confidential_balance::balance_to_bytes(current_balance));
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x1));
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x2));
+    proof_xs.x3s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+    proof_xs.x4s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+
+    ristretto255::new_scalar_from_sha2_512(bytes)
+}
+
+ + + +
+ + + +## Function `fiat_shamir_transfer_sigma_proof_challenge` + +Derives the Fiat-Shamir challenge for the TransferSigmaProof. + + +
fun fiat_shamir_transfer_sigma_proof_challenge(sender_ek: &ristretto255_twisted_elgamal::CompressedPubkey, recipient_ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, sender_amount: &confidential_balance::ConfidentialBalance, recipient_amount: &confidential_balance::ConfidentialBalance, auditor_eks: &vector<ristretto255_twisted_elgamal::CompressedPubkey>, auditor_amounts: &vector<confidential_balance::ConfidentialBalance>, proof_xs: &confidential_proof::TransferSigmaProofXs): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun fiat_shamir_transfer_sigma_proof_challenge(
+    sender_ek: &twisted_elgamal::CompressedPubkey,
+    recipient_ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    sender_amount: &confidential_balance::ConfidentialBalance,
+    recipient_amount: &confidential_balance::ConfidentialBalance,
+    auditor_eks: &vector<twisted_elgamal::CompressedPubkey>,
+    auditor_amounts: &vector<confidential_balance::ConfidentialBalance>,
+    proof_xs: &TransferSigmaProofXs): Scalar
+{
+    // rho = H(DST, G, H, P_s, P_r, P_a_{1..n}, (C_cur, D_cur)_{1..8}, (C_v, D_v)_{1..4}, D_a_{1..4n}, D_s_{1..4}, (C_new, D_new)_{1..8}, X_{1..30 + 4n})
+    let bytes = FIAT_SHAMIR_TRANSFER_SIGMA_DST;
+
+    bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::basepoint_compressed()));
+    bytes.append(
+        ristretto255::compressed_point_to_bytes(ristretto255::point_compress(&ristretto255::hash_to_point_base()))
+    );
+    bytes.append(twisted_elgamal::pubkey_to_bytes(sender_ek));
+    bytes.append(twisted_elgamal::pubkey_to_bytes(recipient_ek));
+    auditor_eks.for_each_ref(|ek| {
+        bytes.append(twisted_elgamal::pubkey_to_bytes(ek));
+    });
+    bytes.append(confidential_balance::balance_to_bytes(current_balance));
+    bytes.append(confidential_balance::balance_to_bytes(recipient_amount));
+    auditor_amounts.for_each_ref(|balance| {
+        confidential_balance::balance_to_points_d(balance).for_each_ref(|d| {
+            bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::point_compress(d)));
+        });
+    });
+    confidential_balance::balance_to_points_d(sender_amount).for_each_ref(|d| {
+        bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::point_compress(d)));
+    });
+    bytes.append(confidential_balance::balance_to_bytes(new_balance));
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x1));
+    proof_xs.x2s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+    proof_xs.x3s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+    proof_xs.x4s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x5));
+    proof_xs.x6s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+    proof_xs.x7s.for_each_ref(|xs| {
+        xs.for_each_ref(|x| {
+            bytes.append(ristretto255::point_to_bytes(x));
+        });
+    });
+    proof_xs.x8s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+
+    ristretto255::new_scalar_from_sha2_512(bytes)
+}
+
+ + + +
+ + + +## Function `fiat_shamir_normalization_sigma_proof_challenge` + +Derives the Fiat-Shamir challenge for the NormalizationSigmaProof. + + +
fun fiat_shamir_normalization_sigma_proof_challenge(ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, proof_xs: &confidential_proof::NormalizationSigmaProofXs): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun fiat_shamir_normalization_sigma_proof_challenge(
+    ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    proof_xs: &NormalizationSigmaProofXs): Scalar
+{
+    // rho = H(DST, G, H, P, (C_cur, D_cur)_{1..8}, (C_new, D_new)_{1..8}, X_{1..18})
+    let bytes = FIAT_SHAMIR_NORMALIZATION_SIGMA_DST;
+
+    bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::basepoint_compressed()));
+    bytes.append(
+        ristretto255::compressed_point_to_bytes(ristretto255::point_compress(&ristretto255::hash_to_point_base()))
+    );
+    bytes.append(twisted_elgamal::pubkey_to_bytes(ek));
+    bytes.append(confidential_balance::balance_to_bytes(current_balance));
+    bytes.append(confidential_balance::balance_to_bytes(new_balance));
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x1));
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x2));
+    proof_xs.x3s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+    proof_xs.x4s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+
+    ristretto255::new_scalar_from_sha2_512(bytes)
+}
+
+ + + +
+ + + +## Function `fiat_shamir_rotation_sigma_proof_challenge` + +Derives the Fiat-Shamir challenge for the RotationSigmaProof. + + +
fun fiat_shamir_rotation_sigma_proof_challenge(current_ek: &ristretto255_twisted_elgamal::CompressedPubkey, new_ek: &ristretto255_twisted_elgamal::CompressedPubkey, current_balance: &confidential_balance::ConfidentialBalance, new_balance: &confidential_balance::ConfidentialBalance, proof_xs: &confidential_proof::RotationSigmaProofXs): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun fiat_shamir_rotation_sigma_proof_challenge(
+    current_ek: &twisted_elgamal::CompressedPubkey,
+    new_ek: &twisted_elgamal::CompressedPubkey,
+    current_balance: &confidential_balance::ConfidentialBalance,
+    new_balance: &confidential_balance::ConfidentialBalance,
+    proof_xs: &RotationSigmaProofXs): Scalar
+{
+    // rho = H(DST, G, H, P_cur, P_new, (C_cur, D_cur)_{1..8}, (C_new, D_new)_{1..8}, X_{1..19})
+    let bytes = FIAT_SHAMIR_ROTATION_SIGMA_DST;
+
+    bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::basepoint_compressed()));
+    bytes.append(
+        ristretto255::compressed_point_to_bytes(ristretto255::point_compress(&ristretto255::hash_to_point_base()))
+    );
+    bytes.append(twisted_elgamal::pubkey_to_bytes(current_ek));
+    bytes.append(twisted_elgamal::pubkey_to_bytes(new_ek));
+    bytes.append(confidential_balance::balance_to_bytes(current_balance));
+    bytes.append(confidential_balance::balance_to_bytes(new_balance));
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x1));
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x2));
+    bytes.append(ristretto255::point_to_bytes(&proof_xs.x3));
+    proof_xs.x4s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+    proof_xs.x5s.for_each_ref(|x| {
+        bytes.append(ristretto255::point_to_bytes(x));
+    });
+
+    ristretto255::new_scalar_from_sha2_512(bytes)
+}
+
+ + + +
+ + + +## Function `msm_withdrawal_gammas` + +Returns the scalar multipliers for the WithdrawalSigmaProof. + + +
fun msm_withdrawal_gammas(rho: &ristretto255::Scalar): confidential_proof::WithdrawalSigmaProofGammas
+
+ + + +
+Implementation + + +
fun msm_withdrawal_gammas(rho: &Scalar): WithdrawalSigmaProofGammas {
+    WithdrawalSigmaProofGammas {
+        g1: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 1)),
+        g2: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 2)),
+        g3s: vector::range(0, 8).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 3, (i as u8)))
+        }),
+        g4s: vector::range(0, 8).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 4, (i as u8)))
+        }),
+    }
+}
+
+ + + +
+ + + +## Function `msm_transfer_gammas` + +Returns the scalar multipliers for the TransferSigmaProof. + + +
fun msm_transfer_gammas(rho: &ristretto255::Scalar, auditors_count: u64): confidential_proof::TransferSigmaProofGammas
+
+ + + +
+Implementation + + +
fun msm_transfer_gammas(rho: &Scalar, auditors_count: u64): TransferSigmaProofGammas {
+    TransferSigmaProofGammas {
+        g1: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 1)),
+        g2s: vector::range(0, 8).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 2, (i as u8)))
+        }),
+        g3s: vector::range(0, 4).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 3, (i as u8)))
+        }),
+        g4s: vector::range(0, 4).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 4, (i as u8)))
+        }),
+        g5: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 5)),
+        g6s: vector::range(0, 8).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 6, (i as u8)))
+        }),
+        g7s: vector::range(0, auditors_count).map(|i| {
+            vector::range(0, 4).map(|j| {
+                ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, (i + 7 as u8), (j as u8)))
+            })
+        }),
+        g8s: vector::range(0, 4).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 8, (i as u8)))
+        }),
+    }
+}
+
+ + + +
+ + + +## Function `msm_normalization_gammas` + +Returns the scalar multipliers for the NormalizationSigmaProof. + + +
fun msm_normalization_gammas(rho: &ristretto255::Scalar): confidential_proof::NormalizationSigmaProofGammas
+
+ + + +
+Implementation + + +
fun msm_normalization_gammas(rho: &Scalar): NormalizationSigmaProofGammas {
+    NormalizationSigmaProofGammas {
+        g1: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 1)),
+        g2: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 2)),
+        g3s: vector::range(0, 8).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 3, (i as u8)))
+        }),
+        g4s: vector::range(0, 8).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 4, (i as u8)))
+        }),
+    }
+}
+
+ + + +
+ + + +## Function `msm_rotation_gammas` + +Returns the scalar multipliers for the RotationSigmaProof. + + +
fun msm_rotation_gammas(rho: &ristretto255::Scalar): confidential_proof::RotationSigmaProofGammas
+
+ + + +
+Implementation + + +
fun msm_rotation_gammas(rho: &Scalar): RotationSigmaProofGammas {
+    RotationSigmaProofGammas {
+        g1: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 1)),
+        g2: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 2)),
+        g3: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 3)),
+        g4s: vector::range(0, 8).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 4, (i as u8)))
+        }),
+        g5s: vector::range(0, 8).map(|i| {
+            ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 5, (i as u8)))
+        }),
+    }
+}
+
+ + + +
+ + + +## Function `msm_gamma_1` + +Returns the scalar multiplier computed as a hash of the provided rho and corresponding gamma index. + + +
fun msm_gamma_1(rho: &ristretto255::Scalar, i: u8): vector<u8>
+
+ + + +
+Implementation + + +
fun msm_gamma_1(rho: &Scalar, i: u8): vector<u8> {
+    let bytes = ristretto255::scalar_to_bytes(rho);
+    bytes.push_back(i);
+    bytes
+}
+
+ + + +
+ + + +## Function `msm_gamma_2` + +Returns the scalar multiplier computed as a hash of the provided rho and corresponding gamma indices. + + +
fun msm_gamma_2(rho: &ristretto255::Scalar, i: u8, j: u8): vector<u8>
+
+ + + +
+Implementation + + +
fun msm_gamma_2(rho: &Scalar, i: u8, j: u8): vector<u8> {
+    let bytes = ristretto255::scalar_to_bytes(rho);
+    bytes.push_back(i);
+    bytes.push_back(j);
+    bytes
+}
+
+ + + +
+ + + +## Function `scalar_mul_3` + +Calculates the product of the provided scalars. + + +
fun scalar_mul_3(scalar1: &ristretto255::Scalar, scalar2: &ristretto255::Scalar, scalar3: &ristretto255::Scalar): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun scalar_mul_3(scalar1: &Scalar, scalar2: &Scalar, scalar3: &Scalar): Scalar {
+    let result = *scalar1;
+
+    ristretto255::scalar_mul_assign(&mut result, scalar2);
+    ristretto255::scalar_mul_assign(&mut result, scalar3);
+
+    result
+}
+
+ + + +
+ + + +## Function `scalar_linear_combination` + +Calculates the linear combination of the provided scalars. + + +
fun scalar_linear_combination(lhs: &vector<ristretto255::Scalar>, rhs: &vector<ristretto255::Scalar>): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun scalar_linear_combination(lhs: &vector<Scalar>, rhs: &vector<Scalar>): Scalar {
+    let result = ristretto255::scalar_zero();
+
+    lhs.zip_ref(rhs, |l, r| {
+        ristretto255::scalar_add_assign(&mut result, &ristretto255::scalar_mul(l, r));
+    });
+
+    result
+}
+
+ + + +
+ + + +## Function `new_scalar_from_pow2` + +Raises 2 to the power of the provided exponent and returns the result as a scalar. + + +
fun new_scalar_from_pow2(exp: u64): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun new_scalar_from_pow2(exp: u64): Scalar {
+    ristretto255::new_scalar_from_u128(1 << (exp as u8))
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/helpers.md b/aptos-move/framework/aptos-experimental/doc/helpers.md new file mode 100644 index 0000000000000..247056fb954dd --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/helpers.md @@ -0,0 +1,126 @@ + + + +# Module `0x7::helpers` + + + +- [Constants](#@Constants_0) +- [Function `cut_vector`](#0x7_helpers_cut_vector) +- [Function `get_veiled_balance_zero_ciphertext`](#0x7_helpers_get_veiled_balance_zero_ciphertext) +- [Function `public_amount_to_veiled_balance`](#0x7_helpers_public_amount_to_veiled_balance) + + +
use 0x1::error;
+use 0x1::ristretto255;
+use 0x1::ristretto255_elgamal;
+use 0x1::vector;
+
+ + + + + +## Constants + + + + +Tried cutting out more elements than are in the vector via cut_vector. + + +
const EVECTOR_CUT_TOO_LARGE: u64 = 1;
+
+ + + + + +## Function `cut_vector` + +Given a vector vec, removes the last cut_len elements of vec and returns them in order. (This function +exists because we did not like the interface of std::vector::trim.) + + +
public fun cut_vector<T>(vec: &mut vector<T>, cut_len: u64): vector<T>
+
+ + + +
+Implementation + + +
public fun cut_vector<T>(vec: &mut vector<T>, cut_len: u64): vector<T> {
+    let len = vector::length(vec);
+    let res = vector::empty();
+    assert!(len >= cut_len, error::out_of_range(EVECTOR_CUT_TOO_LARGE));
+    while (cut_len > 0) {
+        res.push_back(vector::pop_back(vec));
+        cut_len -= 1;
+    };
+    res.reverse();
+    res
+}
+
+ + + +
+ + + +## Function `get_veiled_balance_zero_ciphertext` + +Returns an encryption of zero, without any randomness (i.e., $r=0$), under any ElGamal PK. + + +
public fun get_veiled_balance_zero_ciphertext(): ristretto255_elgamal::CompressedCiphertext
+
+ + + +
+Implementation + + +
public fun get_veiled_balance_zero_ciphertext(): elgamal::CompressedCiphertext {
+    elgamal::ciphertext_from_compressed_points(
+        ristretto255::point_identity_compressed(), ristretto255::point_identity_compressed())
+}
+
+ + + +
+ + + +## Function `public_amount_to_veiled_balance` + +Returns an encryption of amount, without any randomness (i.e., $r=0$), under any ElGamal PK. +WARNING: This is not a proper ciphertext: the value amount can be easily bruteforced. + + +
public fun public_amount_to_veiled_balance(amount: u32): ristretto255_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun public_amount_to_veiled_balance(amount: u32): elgamal::Ciphertext {
+    let scalar = ristretto255::new_scalar_from_u32(amount);
+
+    elgamal::new_ciphertext_no_randomness(&scalar)
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/large_packages.md b/aptos-move/framework/aptos-experimental/doc/large_packages.md new file mode 100644 index 0000000000000..6e744c09c080a --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/large_packages.md @@ -0,0 +1,496 @@ + + + +# Module `0x7::large_packages` + + + + +## Aptos Large Packages Framework + + +This module provides a framework for uploading large packages to the Aptos network, under standard +accounts or objects. +To publish using this API, you must divide your metadata and modules across multiple calls +into large_packages::stage_code_chunk. +In each pass, the caller pushes more code by calling stage_code_chunk. +In the final call, the caller can use stage_code_chunk_and_publish_to_account, stage_code_chunk_and_publish_to_object, or +stage_code_chunk_and_upgrade_object_code to upload the final data chunk and publish or upgrade the package on-chain. + +The above logic is currently implemented in the Python +SDK: [aptos-python-sdk](https://github.com/aptos-labs/aptos-python-sdk/blob/main/aptos_sdk/package_publisher.py). + +Aptos CLI supports this as well with --chunked-publish flag: +- aptos move publish [OPTIONS] --chunked-publish +- aptos move create-object-and-publish-package [OPTIONS] --address-name <ADDRESS_NAME> --chunked-publish +- aptos move upgrade-object-package [OPTIONS] --address-name <ADDRESS_NAME> --chunked-publish + + + + +## Usage + + +1. **Stage Code Chunks**: +- Call stage_code_chunk with the appropriate metadata and code chunks. +- Ensure that code_indices are provided from 0 to last_module_idx, without any +gaps. + + +2. **Publish or Upgrade**: +- In order to upload the last data chunk and publish the package, call stage_code_chunk_and_publish_to_account or stage_code_chunk_and_publish_to_object. + +- For object code upgrades, call stage_code_chunk_and_upgrade_object_code with the argument code_object provided. + +3. **Cleanup**: +- In order to remove StagingArea resource from an account, call cleanup_staging_area. + + + + +## Notes + + +* Make sure LargePackages is deployed to your network of choice, you can currently find it both on +mainnet and testnet at 0xa29df848eebfe5d981f708c2a5b06d31af2be53bbd8ddc94c8523f4b903f7adb, and +in 0x7 (aptos-experimental) on devnet/localnet. +* Ensure that code_indices have no gaps. For example, if code_indices are +provided as [0, 1, 3] (skipping index 2), the inline function assemble_module_code will abort +since StagingArea.last_module_idx is set as the max value of the provided index +from code_indices, and assemble_module_code will lookup the StagingArea.code SmartTable from +0 to StagingArea.last_module_idx in turn. + + +- [Aptos Large Packages Framework](#@Aptos_Large_Packages_Framework_0) +- [Usage](#@Usage_1) +- [Notes](#@Notes_2) +- [Resource `StagingArea`](#0x7_large_packages_StagingArea) +- [Constants](#@Constants_3) +- [Function `stage_code_chunk`](#0x7_large_packages_stage_code_chunk) +- [Function `stage_code_chunk_and_publish_to_account`](#0x7_large_packages_stage_code_chunk_and_publish_to_account) +- [Function `stage_code_chunk_and_publish_to_object`](#0x7_large_packages_stage_code_chunk_and_publish_to_object) +- [Function `stage_code_chunk_and_upgrade_object_code`](#0x7_large_packages_stage_code_chunk_and_upgrade_object_code) +- [Function `stage_code_chunk_internal`](#0x7_large_packages_stage_code_chunk_internal) +- [Function `publish_to_account`](#0x7_large_packages_publish_to_account) +- [Function `publish_to_object`](#0x7_large_packages_publish_to_object) +- [Function `upgrade_object_code`](#0x7_large_packages_upgrade_object_code) +- [Function `assemble_module_code`](#0x7_large_packages_assemble_module_code) +- [Function `cleanup_staging_area`](#0x7_large_packages_cleanup_staging_area) + + +
use 0x1::code;
+use 0x1::error;
+use 0x1::object;
+use 0x1::object_code_deployment;
+use 0x1::signer;
+use 0x1::smart_table;
+use 0x1::vector;
+
+ + + + + +## Resource `StagingArea` + + + +
struct StagingArea has key
+
+ + + +
+Fields + + +
+
+metadata_serialized: vector<u8> +
+
+ +
+
+code: smart_table::SmartTable<u64, vector<u8>> +
+
+ +
+
+last_module_idx: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +code_indices and code_chunks should be the same length. + + +
const ECODE_MISMATCH: u64 = 1;
+
+ + + + + +Object reference should be provided when upgrading object code. + + +
const EMISSING_OBJECT_REFERENCE: u64 = 2;
+
+ + + + + +## Function `stage_code_chunk` + + + +
public entry fun stage_code_chunk(owner: &signer, metadata_chunk: vector<u8>, code_indices: vector<u16>, code_chunks: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun stage_code_chunk(
+    owner: &signer,
+    metadata_chunk: vector<u8>,
+    code_indices: vector<u16>,
+    code_chunks: vector<vector<u8>>,
+) acquires StagingArea {
+    stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks);
+}
+
+ + + +
+ + + +## Function `stage_code_chunk_and_publish_to_account` + + + +
public entry fun stage_code_chunk_and_publish_to_account(owner: &signer, metadata_chunk: vector<u8>, code_indices: vector<u16>, code_chunks: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun stage_code_chunk_and_publish_to_account(
+    owner: &signer,
+    metadata_chunk: vector<u8>,
+    code_indices: vector<u16>,
+    code_chunks: vector<vector<u8>>,
+) acquires StagingArea {
+    let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks);
+    publish_to_account(owner, staging_area);
+    cleanup_staging_area(owner);
+}
+
+ + + +
+ + + +## Function `stage_code_chunk_and_publish_to_object` + + + +
public entry fun stage_code_chunk_and_publish_to_object(owner: &signer, metadata_chunk: vector<u8>, code_indices: vector<u16>, code_chunks: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
public entry fun stage_code_chunk_and_publish_to_object(
+    owner: &signer,
+    metadata_chunk: vector<u8>,
+    code_indices: vector<u16>,
+    code_chunks: vector<vector<u8>>,
+) acquires StagingArea {
+    let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks);
+    publish_to_object(owner, staging_area);
+    cleanup_staging_area(owner);
+}
+
+ + + +
+ + + +## Function `stage_code_chunk_and_upgrade_object_code` + + + +
public entry fun stage_code_chunk_and_upgrade_object_code(owner: &signer, metadata_chunk: vector<u8>, code_indices: vector<u16>, code_chunks: vector<vector<u8>>, code_object: object::Object<code::PackageRegistry>)
+
+ + + +
+Implementation + + +
public entry fun stage_code_chunk_and_upgrade_object_code(
+    owner: &signer,
+    metadata_chunk: vector<u8>,
+    code_indices: vector<u16>,
+    code_chunks: vector<vector<u8>>,
+    code_object: Object<PackageRegistry>,
+) acquires StagingArea {
+    let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks);
+    upgrade_object_code(owner, staging_area, code_object);
+    cleanup_staging_area(owner);
+}
+
+ + + +
+ + + +## Function `stage_code_chunk_internal` + + + +
fun stage_code_chunk_internal(owner: &signer, metadata_chunk: vector<u8>, code_indices: vector<u16>, code_chunks: vector<vector<u8>>): &mut large_packages::StagingArea
+
+ + + +
+Implementation + + +
inline fun stage_code_chunk_internal(
+    owner: &signer,
+    metadata_chunk: vector<u8>,
+    code_indices: vector<u16>,
+    code_chunks: vector<vector<u8>>,
+): &mut StagingArea acquires StagingArea {
+    assert!(
+        vector::length(&code_indices) == vector::length(&code_chunks),
+        error::invalid_argument(ECODE_MISMATCH),
+    );
+
+    let owner_address = signer::address_of(owner);
+
+    if (!exists<StagingArea>(owner_address)) {
+        move_to(owner, StagingArea {
+            metadata_serialized: vector[],
+            code: smart_table::new(),
+            last_module_idx: 0,
+        });
+    };
+
+    let staging_area = borrow_global_mut<StagingArea>(owner_address);
+
+    if (!vector::is_empty(&metadata_chunk)) {
+        vector::append(&mut staging_area.metadata_serialized, metadata_chunk);
+    };
+
+    let i = 0;
+    while (i < vector::length(&code_chunks)) {
+        let inner_code = *vector::borrow(&code_chunks, i);
+        let idx = (*vector::borrow(&code_indices, i) as u64);
+
+        if (smart_table::contains(&staging_area.code, idx)) {
+            vector::append(smart_table::borrow_mut(&mut staging_area.code, idx), inner_code);
+        } else {
+            smart_table::add(&mut staging_area.code, idx, inner_code);
+            if (idx > staging_area.last_module_idx) {
+                staging_area.last_module_idx = idx;
+            }
+        };
+        i = i + 1;
+    };
+
+    staging_area
+}
+
+ + + +
+ + + +## Function `publish_to_account` + + + +
fun publish_to_account(publisher: &signer, staging_area: &mut large_packages::StagingArea)
+
+ + + +
+Implementation + + +
inline fun publish_to_account(
+    publisher: &signer,
+    staging_area: &mut StagingArea,
+) {
+    let code = assemble_module_code(staging_area);
+    code::publish_package_txn(publisher, staging_area.metadata_serialized, code);
+}
+
+ + + +
+ + + +## Function `publish_to_object` + + + +
fun publish_to_object(publisher: &signer, staging_area: &mut large_packages::StagingArea)
+
+ + + +
+Implementation + + +
inline fun publish_to_object(
+    publisher: &signer,
+    staging_area: &mut StagingArea,
+) {
+    let code = assemble_module_code(staging_area);
+    object_code_deployment::publish(publisher, staging_area.metadata_serialized, code);
+}
+
+ + + +
+ + + +## Function `upgrade_object_code` + + + +
fun upgrade_object_code(publisher: &signer, staging_area: &mut large_packages::StagingArea, code_object: object::Object<code::PackageRegistry>)
+
+ + + +
+Implementation + + +
inline fun upgrade_object_code(
+    publisher: &signer,
+    staging_area: &mut StagingArea,
+    code_object: Object<PackageRegistry>,
+) {
+    let code = assemble_module_code(staging_area);
+    object_code_deployment::upgrade(publisher, staging_area.metadata_serialized, code, code_object);
+}
+
+ + + +
+ + + +## Function `assemble_module_code` + + + +
fun assemble_module_code(staging_area: &mut large_packages::StagingArea): vector<vector<u8>>
+
+ + + +
+Implementation + + +
inline fun assemble_module_code(
+    staging_area: &mut StagingArea,
+): vector<vector<u8>> {
+    let last_module_idx = staging_area.last_module_idx;
+    let code = vector[];
+    let i = 0;
+    while (i <= last_module_idx) {
+        vector::push_back(
+            &mut code,
+            *smart_table::borrow(&staging_area.code, i)
+        );
+        i = i + 1;
+    };
+    code
+}
+
+ + + +
+ + + +## Function `cleanup_staging_area` + + + +
public entry fun cleanup_staging_area(owner: &signer)
+
+ + + +
+Implementation + + +
public entry fun cleanup_staging_area(owner: &signer) acquires StagingArea {
+    let StagingArea {
+        metadata_serialized: _,
+        code,
+        last_module_idx: _,
+    } = move_from<StagingArea>(signer::address_of(owner));
+    smart_table::destroy(code);
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/market.md b/aptos-move/framework/aptos-experimental/doc/market.md new file mode 100644 index 0000000000000..04ee9fb20effd --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/market.md @@ -0,0 +1,2205 @@ + + + +# Module `0x7::market` + +This module provides a generic trading engine implementation for a market. On a high level, its a data structure, +that stores an order book and provides APIs to place orders, cancel orders, and match orders. The market also acts +as a wrapper around the order book and pluggable clearinghouse implementation. +A clearing house implementation is expected to implement the following APIs +- settle_trade(taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size): SettleTradeResult -> +Called by the market when there is an match between taker and maker. The clearinghouse is expected to settle the trade +and return the result. Please note that the clearing house settlment size might not be the same as the order match size and +the settlement might also fail. The fill_id is an incremental counter for matched orders and can be used to track specific fills +- validate_order_placement(account, is_taker, is_long, price, size): bool -> Called by the market to validate +an order when its placed. The clearinghouse is expected to validate the order and return true if the order is valid. +Checkout clearinghouse_test as an example of the simplest form of clearing house implementation that just tracks +the position size of the user and does not do any validation. + +- place_maker_order(account, order_id, is_bid, price, size, metadata) -> Called by the market before placing the +maker order in the order book. The clearinghouse can use this to track pending orders in the order book and perform +any other book keeping operations. + +- cleanup_order(account, order_id, is_bid, remaining_size) -> Called by the market when an order is cancelled or fully filled +The clearinhouse can perform any cleanup operations like removing the order from the pending orders list. For every order placement +that passes the validate_order_placement check, +the market guarantees that the cleanup_order API will be called once and only once with the remaining size of the order. + +- decrease_order_size(account, order_id, is_bid, price, size) -> Called by the market when a maker order is decreased +in size by the user. Please note that this API will only be called after place_maker_order is called and the order is +already in the order book. Size in this case is the remaining size of the order after the decrease. + +Following are some valid sequence of API calls that the market makes to the clearinghouse: +1. validate_order_placement(10) +2. settle_trade(2) +3. settle_trade(3) +4. place_maker_order(5) +5. decrease_order_size(2) +6. decrease_order_size(1) +7. cleanup_order(2) +or +1. validate_order_placement(10) +2. cleanup_order(10) + +Upon placement of an order, the market generates an order id and emits an event with the order details - the order id +is a unique id for the order that can be used to later get the status of the order or cancel the order. + +Market also supports various conditions for order matching like Good Till Cancelled (GTC), Post Only, Immediate or Cancel (IOC). +GTC orders are orders that are valid until they are cancelled or filled. Post Only orders are orders that are valid only if they are not +taker orders. IOC orders are orders that are valid only if they are taker orders. + +In addition, the market also supports trigger conditions for orders. An order with trigger condition is not put +on the order book until its trigger conditions are met. Following trigger conditions are supported: +TakeProfit(price): If its a buy order its triggered when the market price is greater than or equal to the price. If +its a sell order its triggered when the market price is less than or equal to the price. +StopLoss(price): If its a buy order its triggered when the market price is less than or equal to the price. If its +a sell order its triggered when the market price is greater than or equal to the price. +TimeBased(time): The order is triggered when the current time is greater than or equal to the time. + + +- [Struct `Market`](#0x7_market_Market) +- [Struct `MarketConfig`](#0x7_market_MarketConfig) +- [Struct `OrderEvent`](#0x7_market_OrderEvent) +- [Enum `OrderCancellationReason`](#0x7_market_OrderCancellationReason) +- [Struct `OrderMatchResult`](#0x7_market_OrderMatchResult) +- [Constants](#@Constants_0) +- [Function `good_till_cancelled`](#0x7_market_good_till_cancelled) +- [Function `post_only`](#0x7_market_post_only) +- [Function `immediate_or_cancel`](#0x7_market_immediate_or_cancel) +- [Function `order_status_open`](#0x7_market_order_status_open) +- [Function `order_status_filled`](#0x7_market_order_status_filled) +- [Function `order_status_cancelled`](#0x7_market_order_status_cancelled) +- [Function `order_status_rejected`](#0x7_market_order_status_rejected) +- [Function `destroy_order_match_result`](#0x7_market_destroy_order_match_result) +- [Function `number_of_fills`](#0x7_market_number_of_fills) +- [Function `total_fill_size`](#0x7_market_total_fill_size) +- [Function `get_cancel_reason`](#0x7_market_get_cancel_reason) +- [Function `get_remaining_size_from_result`](#0x7_market_get_remaining_size_from_result) +- [Function `is_ioc_violation`](#0x7_market_is_ioc_violation) +- [Function `is_fill_limit_violation`](#0x7_market_is_fill_limit_violation) +- [Function `get_order_id`](#0x7_market_get_order_id) +- [Function `new_market_config`](#0x7_market_new_market_config) +- [Function `new_market`](#0x7_market_new_market) +- [Function `get_market`](#0x7_market_get_market) +- [Function `get_order_book`](#0x7_market_get_order_book) +- [Function `get_order_book_mut`](#0x7_market_get_order_book_mut) +- [Function `best_bid_price`](#0x7_market_best_bid_price) +- [Function `best_ask_price`](#0x7_market_best_ask_price) +- [Function `is_taker_order`](#0x7_market_is_taker_order) +- [Function `place_order`](#0x7_market_place_order) +- [Function `next_order_id`](#0x7_market_next_order_id) +- [Function `next_fill_id`](#0x7_market_next_fill_id) +- [Function `emit_event_for_order`](#0x7_market_emit_event_for_order) +- [Function `place_order_with_user_addr`](#0x7_market_place_order_with_user_addr) +- [Function `place_maker_order_internal`](#0x7_market_place_maker_order_internal) +- [Function `cancel_maker_order_internal`](#0x7_market_cancel_maker_order_internal) +- [Function `cancel_order_internal`](#0x7_market_cancel_order_internal) +- [Function `place_order_with_order_id`](#0x7_market_place_order_with_order_id) +- [Function `cancel_order`](#0x7_market_cancel_order) +- [Function `decrease_order_size`](#0x7_market_decrease_order_size) +- [Function `get_remaining_size`](#0x7_market_get_remaining_size) +- [Function `take_ready_price_based_orders`](#0x7_market_take_ready_price_based_orders) +- [Function `take_ready_time_based_orders`](#0x7_market_take_ready_time_based_orders) + + +
use 0x1::event;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::vector;
+use 0x7::market_types;
+use 0x7::order_book;
+use 0x7::order_book_types;
+
+ + + + + +## Struct `Market` + + + +
struct Market<M: copy, drop, store> has store
+
+ + + +
+Fields + + +
+
+parent: address +
+
+ Address of the parent object that created this market + Purely for grouping events based on the source DEX, not used otherwise +
+
+market: address +
+
+ Address of the market object of this market. +
+
+last_order_id: u64 +
+
+ +
+
+next_fill_id: u64 +
+
+ +
+
+config: market::MarketConfig +
+
+ +
+
+order_book: order_book::OrderBook<M> +
+
+ +
+
+ + +
+ + + +## Struct `MarketConfig` + + + +
struct MarketConfig has store
+
+ + + +
+Fields + + +
+
+allow_self_trade: bool +
+
+ Weather to allow self matching orders +
+
+allow_events_emission: bool +
+
+ Whether to allow sending all events for the markett +
+
+ + +
+ + + +## Struct `OrderEvent` + + + +
#[event]
+struct OrderEvent has copy, drop, store
+
+ + + +
+Fields + + +
+
+parent: address +
+
+ +
+
+market: address +
+
+ +
+
+order_id: u64 +
+
+ +
+
+user: address +
+
+ +
+
+orig_size: u64 +
+
+ Original size of the order +
+
+remaining_size: u64 +
+
+ Remaining size of the order in the order book +
+
+size_delta: u64 +
+
+ OPEN - size_delta will be amount of size added + CANCELLED - size_delta will be amount of size removed + FILLED - size_delta will be amount of size filled + REJECTED - size_delta will always be 0 +
+
+price: u64 +
+
+ +
+
+is_buy: bool +
+
+ +
+
+is_taker: bool +
+
+ Whether the order crosses the orderbook. +
+
+status: u8 +
+
+ +
+
+details: string::String +
+
+ +
+
+ + +
+ + + +## Enum `OrderCancellationReason` + + + +
enum OrderCancellationReason has copy, drop
+
+ + + +
+Variants + + +
+PostOnlyViolation + + +
+Fields + + +
+
+ + +
+ +
+ +
+IOCViolation + + +
+Fields + + +
+
+ + +
+ +
+ +
+PositionUpdateViolation + + +
+Fields + + +
+
+ + +
+ +
+ +
+ReduceOnlyViolation + + +
+Fields + + +
+
+ + +
+ +
+ +
+ClearinghouseSettleViolation + + +
+Fields + + +
+
+ + +
+ +
+ +
+MaxFillLimitViolation + + +
+Fields + + +
+
+ + +
+ +
+ +
+ + + +## Struct `OrderMatchResult` + + + +
struct OrderMatchResult has drop
+
+ + + +
+Fields + + +
+
+order_id: u64 +
+
+ +
+
+remaining_size: u64 +
+
+ +
+
+cancel_reason: option::Option<market::OrderCancellationReason> +
+
+ +
+
+fill_sizes: vector<u64> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ENOT_ADMIN: u64 = 4;
+
+ + + + + + + +
const EINVALID_FEE_TIER: u64 = 5;
+
+ + + + + + + +
const EINVALID_LIQUIDATION: u64 = 11;
+
+ + + + + + + +
const EINVALID_MATCHING_FOR_MAKER_REINSERT: u64 = 9;
+
+ + + + + + + +
const EINVALID_ORDER: u64 = 1;
+
+ + + + + + + +
const EINVALID_TAKER_POSITION_UPDATE: u64 = 10;
+
+ + + + + + + +
const EINVALID_TIME_IN_FORCE_FOR_MAKER: u64 = 7;
+
+ + + + + + + +
const EINVALID_TIME_IN_FORCE_FOR_TAKER: u64 = 8;
+
+ + + + + + + +
const EMARKET_NOT_FOUND: u64 = 3;
+
+ + + + + + + +
const EORDER_BOOK_FULL: u64 = 2;
+
+ + + + + + + +
const EORDER_DOES_NOT_EXIST: u64 = 6;
+
+ + + + + + + +
const ORDER_SIZE_REDUCED: u8 = 4;
+
+ + + + + +Order has been cancelled by the user or engine. + + +
const ORDER_STATUS_CANCELLED: u8 = 2;
+
+ + + + + +Order has been fully or partially filled. + + +
const ORDER_STATUS_FILLED: u8 = 1;
+
+ + + + + +Order has been accepted by the engine. + + +
const ORDER_STATUS_OPEN: u8 = 0;
+
+ + + + + +Order has been rejected by the engine. Unlike cancelled orders, rejected +orders are invalid orders. Rejection reasons: +1. Insufficient margin +2. Order is reduce_only but does not reduce + + +
const ORDER_STATUS_REJECTED: u8 = 3;
+
+ + + + + +Order time in force +Good till cancelled order type + + +
const TIME_IN_FORCE_GTC: u8 = 0;
+
+ + + + + +Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the +order as possible as taker order and cancel the rest. + + +
const TIME_IN_FORCE_IOC: u8 = 2;
+
+ + + + + +Post Only order type - ensures that the order is not a taker order + + +
const TIME_IN_FORCE_POST_ONLY: u8 = 1;
+
+ + + + + +## Function `good_till_cancelled` + + + +
public fun good_till_cancelled(): u8
+
+ + + +
+Implementation + + +
public fun good_till_cancelled(): u8 {
+    TIME_IN_FORCE_GTC
+}
+
+ + + +
+ + + +## Function `post_only` + + + +
public fun post_only(): u8
+
+ + + +
+Implementation + + +
public fun post_only(): u8 {
+    TIME_IN_FORCE_POST_ONLY
+}
+
+ + + +
+ + + +## Function `immediate_or_cancel` + + + +
public fun immediate_or_cancel(): u8
+
+ + + +
+Implementation + + +
public fun immediate_or_cancel(): u8 {
+    TIME_IN_FORCE_IOC
+}
+
+ + + +
+ + + +## Function `order_status_open` + + + +
public fun order_status_open(): u8
+
+ + + +
+Implementation + + +
public fun order_status_open(): u8 {
+    ORDER_STATUS_OPEN
+}
+
+ + + +
+ + + +## Function `order_status_filled` + + + +
public fun order_status_filled(): u8
+
+ + + +
+Implementation + + +
public fun order_status_filled(): u8 {
+    ORDER_STATUS_FILLED
+}
+
+ + + +
+ + + +## Function `order_status_cancelled` + + + +
public fun order_status_cancelled(): u8
+
+ + + +
+Implementation + + +
public fun order_status_cancelled(): u8 {
+    ORDER_STATUS_CANCELLED
+}
+
+ + + +
+ + + +## Function `order_status_rejected` + + + +
public fun order_status_rejected(): u8
+
+ + + +
+Implementation + + +
public fun order_status_rejected(): u8 {
+    ORDER_STATUS_REJECTED
+}
+
+ + + +
+ + + +## Function `destroy_order_match_result` + + + +
public fun destroy_order_match_result(self: market::OrderMatchResult): (u64, u64, option::Option<market::OrderCancellationReason>, vector<u64>)
+
+ + + +
+Implementation + + +
public fun destroy_order_match_result(
+    self: OrderMatchResult
+): (u64, u64, Option<OrderCancellationReason>, vector<u64>) {
+    let OrderMatchResult { order_id, remaining_size, cancel_reason, fill_sizes } =
+        self;
+    (order_id, remaining_size, cancel_reason, fill_sizes)
+}
+
+ + + +
+ + + +## Function `number_of_fills` + + + +
public fun number_of_fills(self: &market::OrderMatchResult): u64
+
+ + + +
+Implementation + + +
public fun number_of_fills(self: &OrderMatchResult): u64 {
+    self.fill_sizes.length()
+}
+
+ + + +
+ + + +## Function `total_fill_size` + + + +
public fun total_fill_size(self: &market::OrderMatchResult): u64
+
+ + + +
+Implementation + + +
public fun total_fill_size(self: &OrderMatchResult): u64 {
+    self.fill_sizes.fold(0, |acc, fill_size| acc + fill_size)
+}
+
+ + + +
+ + + +## Function `get_cancel_reason` + + + +
public fun get_cancel_reason(self: &market::OrderMatchResult): option::Option<market::OrderCancellationReason>
+
+ + + +
+Implementation + + +
public fun get_cancel_reason(self: &OrderMatchResult): Option<OrderCancellationReason> {
+    self.cancel_reason
+}
+
+ + + +
+ + + +## Function `get_remaining_size_from_result` + + + +
public fun get_remaining_size_from_result(self: &market::OrderMatchResult): u64
+
+ + + +
+Implementation + + +
public fun get_remaining_size_from_result(self: &OrderMatchResult): u64 {
+    self.remaining_size
+}
+
+ + + +
+ + + +## Function `is_ioc_violation` + + + +
public fun is_ioc_violation(self: market::OrderCancellationReason): bool
+
+ + + +
+Implementation + + +
public fun is_ioc_violation(self: OrderCancellationReason): bool {
+    return self == OrderCancellationReason::IOCViolation
+}
+
+ + + +
+ + + +## Function `is_fill_limit_violation` + + + +
public fun is_fill_limit_violation(cancel_reason: market::OrderCancellationReason): bool
+
+ + + +
+Implementation + + +
public fun is_fill_limit_violation(
+    cancel_reason: OrderCancellationReason
+): bool {
+    return cancel_reason == OrderCancellationReason::MaxFillLimitViolation
+}
+
+ + + +
+ + + +## Function `get_order_id` + + + +
public fun get_order_id(self: market::OrderMatchResult): u64
+
+ + + +
+Implementation + + +
public fun get_order_id(self: OrderMatchResult): u64 {
+    self.order_id
+}
+
+ + + +
+ + + +## Function `new_market_config` + + + +
public fun new_market_config(allow_self_matching: bool, allow_events_emission: bool): market::MarketConfig
+
+ + + +
+Implementation + + +
public fun new_market_config(
+    allow_self_matching: bool, allow_events_emission: bool
+): MarketConfig {
+    MarketConfig { allow_self_trade: allow_self_matching, allow_events_emission: allow_events_emission }
+}
+
+ + + +
+ + + +## Function `new_market` + + + +
public fun new_market<M: copy, drop, store>(parent: &signer, market: &signer, config: market::MarketConfig): market::Market<M>
+
+ + + +
+Implementation + + +
public fun new_market<M: store + copy + drop>(
+    parent: &signer, market: &signer, config: MarketConfig
+): Market<M> {
+    // requiring signers, and not addresses, purely to guarantee different dexes
+    // cannot polute events to each other, accidentally or maliciously.
+    Market {
+        parent: signer::address_of(parent),
+        market: signer::address_of(market),
+        last_order_id: 0,
+        next_fill_id: 0,
+        config,
+        order_book: new_order_book()
+    }
+}
+
+ + + +
+ + + +## Function `get_market` + + + +
public fun get_market<M: copy, drop, store>(self: &market::Market<M>): address
+
+ + + +
+Implementation + + +
public fun get_market<M: store + copy + drop>(self: &Market<M>): address {
+    self.market
+}
+
+ + + +
+ + + +## Function `get_order_book` + + + +
public fun get_order_book<M: copy, drop, store>(self: &market::Market<M>): &order_book::OrderBook<M>
+
+ + + +
+Implementation + + +
public fun get_order_book<M: store + copy + drop>(self: &Market<M>): &OrderBook<M> {
+    &self.order_book
+}
+
+ + + +
+ + + +## Function `get_order_book_mut` + + + +
public fun get_order_book_mut<M: copy, drop, store>(self: &mut market::Market<M>): &mut order_book::OrderBook<M>
+
+ + + +
+Implementation + + +
public fun get_order_book_mut<M: store + copy + drop>(
+    self: &mut Market<M>
+): &mut OrderBook<M> {
+    &mut self.order_book
+}
+
+ + + +
+ + + +## Function `best_bid_price` + + + +
public fun best_bid_price<M: copy, drop, store>(self: &market::Market<M>): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun best_bid_price<M: store + copy + drop>(self: &Market<M>): Option<u64> {
+    self.order_book.best_bid_price()
+}
+
+ + + +
+ + + +## Function `best_ask_price` + + + +
public fun best_ask_price<M: copy, drop, store>(self: &market::Market<M>): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun best_ask_price<M: store + copy + drop>(self: &Market<M>): Option<u64> {
+    self.order_book.best_ask_price()
+}
+
+ + + +
+ + + +## Function `is_taker_order` + + + +
public fun is_taker_order<M: copy, drop, store>(self: &market::Market<M>, price: u64, is_buy: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>): bool
+
+ + + +
+Implementation + + +
public fun is_taker_order<M: store + copy + drop>(
+    self: &Market<M>,
+    price: u64,
+    is_buy: bool,
+    trigger_condition: Option<TriggerCondition>
+): bool {
+    self.order_book.is_taker_order(price, is_buy, trigger_condition)
+}
+
+ + + +
+ + + +## Function `place_order` + +Places an order - If its a taker order, it will be matched immediately and if its a maker order, it will simply +be placed in the order book. An order id is generated when the order is placed and this id can be used to +uniquely identify the order for this market and can also be used to get the status of the order or cancel the order. +The order is placed with the following parameters: +- user: The user who is placing the order +- price: The price at which the order is placed +- orig_size: The original size of the order +- is_buy: Whether the order is a buy order or a sell order +- time_in_force: The time in force for the order. This can be one of the following: +- TIME_IN_FORCE_GTC: Good till cancelled order type +- TIME_IN_FORCE_POST_ONLY: Post Only order type - ensures that the order is not a taker order +- TIME_IN_FORCE_IOC: Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the +order as possible as taker order and cancel the rest. +- trigger_condition: The trigger condition +- metadata: The metadata for the order. This can be any type that the clearing house implementation supports. +- max_fill_limit: The maximum fill limit for the order. This is the maximum number of fills to trigger for this order. +This knob is present to configure maximum amount of gas any order placement transaction might consume and avoid +hitting the maximum has limit of the blockchain. +- emit_cancel_on_fill_limit: bool,: Whether to emit an order cancellation event when the fill limit is reached. +This is used ful as the caller might not want to cancel the order when the limit is reached and can continue +that order in a separate transaction. +- callbacks: The callbacks for the market clearinghouse. This is a struct that implements the MarketClearinghouseCallbacks +interface. This is used to validate the order and settle the trade. +Returns the order id, remaining size, cancel reason and number of fills for the order. + + +
public fun place_order<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, price: u64, orig_size: u64, is_bid: bool, time_in_force: u8, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, max_fill_limit: u64, emit_cancel_on_fill_limit: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
+ + + +
+Implementation + + +
public fun place_order<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user: &signer,
+    price: u64,
+    orig_size: u64,
+    is_bid: bool,
+    time_in_force: u8,
+    trigger_condition: Option<TriggerCondition>,
+    metadata: M,
+    max_fill_limit: u64,
+    emit_cancel_on_fill_limit: bool,
+    callbacks: &MarketClearinghouseCallbacks<M>
+): OrderMatchResult {
+    let order_id = self.next_order_id();
+    self.place_order_with_order_id(
+        signer::address_of(user),
+        price,
+        orig_size,
+        orig_size,
+        is_bid,
+        time_in_force,
+        trigger_condition,
+        metadata,
+        order_id,
+        max_fill_limit,
+        emit_cancel_on_fill_limit,
+        true,
+        callbacks
+    )
+}
+
+ + + +
+ + + +## Function `next_order_id` + + + +
public fun next_order_id<M: copy, drop, store>(self: &mut market::Market<M>): u64
+
+ + + +
+Implementation + + +
public fun next_order_id<M: store + copy + drop>(self: &mut Market<M>): u64 {
+    self.last_order_id += 1;
+    self.last_order_id
+}
+
+ + + +
+ + + +## Function `next_fill_id` + + + +
fun next_fill_id<M: copy, drop, store>(self: &mut market::Market<M>): u64
+
+ + + +
+Implementation + + +
fun next_fill_id<M: store + copy + drop>(self: &mut Market<M>): u64 {
+    let next_fill_id = self.next_fill_id;
+    self.next_fill_id += 1;
+    next_fill_id
+}
+
+ + + +
+ + + +## Function `emit_event_for_order` + + + +
fun emit_event_for_order<M: copy, drop, store>(self: &market::Market<M>, order_id: u64, user: address, orig_size: u64, remaining_size: u64, size_delta: u64, price: u64, is_bid: bool, is_taker: bool, status: u8, details: &string::String)
+
+ + + +
+Implementation + + +
fun emit_event_for_order<M: store + copy + drop>(
+    self: &Market<M>,
+    order_id: u64,
+    user: address,
+    orig_size: u64,
+    remaining_size: u64,
+    size_delta: u64,
+    price: u64,
+    is_bid: bool,
+    is_taker: bool,
+    status: u8,
+    details: &String
+) {
+    // Final check whether event sending is enabled
+    if (self.config.allow_events_emission) {
+        event::emit(
+            OrderEvent {
+                parent: self.parent,
+                market: self.market,
+                order_id,
+                user,
+                orig_size,
+                remaining_size,
+                size_delta,
+                price,
+                is_buy: is_bid,
+                is_taker,
+                status,
+                details: *details
+            }
+        );
+    };
+}
+
+ + + +
+ + + +## Function `place_order_with_user_addr` + +Similar to place_order API but instead of a signer, it takes a user address - can be used in case trading +functionality is delegated to a different address. Please note that it is the responsibility of the caller +to verify that the transaction signer is authorized to place orders on behalf of the user. + + +
public fun place_order_with_user_addr<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: u64, orig_size: u64, is_bid: bool, time_in_force: u8, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, max_fill_limit: u64, emit_cancel_on_fill_limit: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
+ + + +
+Implementation + + +
public fun place_order_with_user_addr<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user_addr: address,
+    price: u64,
+    orig_size: u64,
+    is_bid: bool,
+    time_in_force: u8,
+    trigger_condition: Option<TriggerCondition>,
+    metadata: M,
+    max_fill_limit: u64,
+    emit_cancel_on_fill_limit: bool,
+    callbacks: &MarketClearinghouseCallbacks<M>
+): OrderMatchResult {
+    let order_id = self.next_order_id();
+    self.place_order_with_order_id(
+        user_addr,
+        price,
+        orig_size,
+        orig_size,
+        is_bid,
+        time_in_force,
+        trigger_condition,
+        metadata,
+        order_id,
+        max_fill_limit,
+        emit_cancel_on_fill_limit,
+        true,
+        callbacks
+    )
+}
+
+ + + +
+ + + +## Function `place_maker_order_internal` + + + +
fun place_maker_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: u64, orig_size: u64, remaining_size: u64, fill_sizes: vector<u64>, is_bid: bool, time_in_force: u8, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, order_id: u64, emit_order_open: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
+ + + +
+Implementation + + +
fun place_maker_order_internal<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user_addr: address,
+    price: u64,
+    orig_size: u64,
+    remaining_size: u64,
+    fill_sizes: vector<u64>,
+    is_bid: bool,
+    time_in_force: u8,
+    trigger_condition: Option<TriggerCondition>,
+    metadata: M,
+    order_id: u64,
+    emit_order_open: bool,
+    callbacks: &MarketClearinghouseCallbacks<M>
+): OrderMatchResult {
+    // Validate that the order is valid from position management perspective
+    if (time_in_force == TIME_IN_FORCE_IOC) {
+        return self.cancel_order_internal(
+            user_addr,
+            price,
+            order_id,
+            orig_size,
+            remaining_size,
+            fill_sizes,
+            is_bid,
+            false, // is_taker
+            OrderCancellationReason::IOCViolation,
+            std::string::utf8(b"IOC Violation"),
+            callbacks
+        );
+    };
+
+    if (emit_order_open) {
+        emit_event_for_order(
+            self,
+            order_id,
+            user_addr,
+            orig_size,
+            remaining_size,
+            orig_size,
+            price,
+            is_bid,
+            false, // is_taker
+            ORDER_STATUS_OPEN,
+            &std::string::utf8(b"")
+        );
+    };
+
+    callbacks.place_maker_order(
+        user_addr, order_id, is_bid, price, remaining_size, metadata
+    );
+    self.order_book.place_maker_order(
+        new_order_request(
+            user_addr,
+            order_id,
+            option::none(),
+            price,
+            orig_size,
+            remaining_size,
+            is_bid,
+            trigger_condition,
+            metadata
+        )
+    );
+    return OrderMatchResult {
+        order_id,
+        remaining_size,
+        cancel_reason: option::none(),
+        fill_sizes
+    }
+}
+
+ + + +
+ + + +## Function `cancel_maker_order_internal` + + + +
fun cancel_maker_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, maker_order: &order_book_types::Order<M>, order_id: u64, maker_address: address, maker_cancellation_reason: string::String, unsettled_size: u64, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
+
+ + + +
+Implementation + + +
fun cancel_maker_order_internal<M: store + copy + drop>(
+    self: &mut Market<M>,
+    maker_order: &Order<M>,
+    order_id: u64,
+    maker_address: address,
+    maker_cancellation_reason: String,
+    unsettled_size: u64,
+    callbacks: &MarketClearinghouseCallbacks<M>
+) {
+    let maker_cancel_size = unsettled_size + maker_order.get_remaining_size();
+
+    emit_event_for_order(
+        self,
+        order_id,
+        maker_address,
+        maker_order.get_orig_size(),
+        0,
+        maker_cancel_size,
+        maker_order.get_price(),
+        maker_order.is_bid(),
+        false,
+        ORDER_STATUS_CANCELLED,
+        &maker_cancellation_reason
+    );
+    // If the maker is invalid cancel the maker order and continue to the next maker order
+    if (maker_order.get_remaining_size() != 0) {
+        self.order_book.cancel_order(maker_address, order_id);
+    };
+    callbacks.cleanup_order(
+        maker_address, order_id, maker_order.is_bid(), maker_cancel_size
+    );
+}
+
+ + + +
+ + + +## Function `cancel_order_internal` + + + +
fun cancel_order_internal<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: u64, order_id: u64, orig_size: u64, size_delta: u64, fill_sizes: vector<u64>, is_bid: bool, is_taker: bool, cancel_reason: market::OrderCancellationReason, cancel_details: string::String, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
+ + + +
+Implementation + + +
fun cancel_order_internal<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user_addr: address,
+    price: u64,
+    order_id: u64,
+    orig_size: u64,
+    size_delta: u64,
+    fill_sizes: vector<u64>,
+    is_bid: bool,
+    is_taker: bool,
+    cancel_reason: OrderCancellationReason,
+    cancel_details: String,
+    callbacks: &MarketClearinghouseCallbacks<M>
+): OrderMatchResult {
+    emit_event_for_order(
+        self,
+        order_id,
+        user_addr,
+        orig_size,
+        0, // remaining size
+        size_delta,
+        price,
+        is_bid,
+        is_taker,
+        ORDER_STATUS_CANCELLED,
+        &cancel_details
+    );
+    callbacks.cleanup_order(
+        user_addr, order_id, is_bid, size_delta
+    );
+    return OrderMatchResult {
+        order_id,
+        remaining_size: 0,
+        cancel_reason: option::some(cancel_reason),
+        fill_sizes
+    }
+}
+
+ + + +
+ + + +## Function `place_order_with_order_id` + +Similar to place_order API but allows few extra parameters as follows +- order_id: The order id for the order - this is needed because for orders with trigger conditions, the order +id is generated when the order is placed and when they are triggered, the same order id is used to match the order. +- emit_taker_order_open: bool: Whether to emit an order open event for the taker order - this is used when +the caller do not wants to emit an open order event for a taker in case the taker order was intterrupted because +of fill limit violation in the previous transaction and the order is just a continuation of the previous order. + + +
public fun place_order_with_order_id<M: copy, drop, store>(self: &mut market::Market<M>, user_addr: address, price: u64, orig_size: u64, remaining_size: u64, is_bid: bool, time_in_force: u8, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M, order_id: u64, max_fill_limit: u64, cancel_on_fill_limit: bool, emit_taker_order_open: bool, callbacks: &market_types::MarketClearinghouseCallbacks<M>): market::OrderMatchResult
+
+ + + +
+Implementation + + +
public fun place_order_with_order_id<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user_addr: address,
+    price: u64,
+    orig_size: u64,
+    remaining_size: u64,
+    is_bid: bool,
+    time_in_force: u8,
+    trigger_condition: Option<TriggerCondition>,
+    metadata: M,
+    order_id: u64,
+    max_fill_limit: u64,
+    cancel_on_fill_limit: bool,
+    emit_taker_order_open: bool,
+    callbacks: &MarketClearinghouseCallbacks<M>
+): OrderMatchResult {
+    assert!(
+        orig_size > 0 && remaining_size > 0,
+        EINVALID_ORDER
+    );
+    // TODO(skedia) is_taker_order API can actually return false positive as the maker orders might not be valid.
+    // Changes are needed to ensure the maker order is valid for this order to be a valid taker order.
+    // TODO(skedia) reconsile the semantics around global order id vs account local id.
+    if (
+        !callbacks.validate_order_placement(
+            user_addr,
+            order_id,
+            true, // is_taker
+            is_bid,
+            price,
+            remaining_size,
+            metadata
+        )) {
+        return self.cancel_order_internal(
+            user_addr,
+            price,
+            order_id,
+            orig_size,
+            0, // 0 because order was never placed
+            vector[],
+            is_bid,
+            true, // is_taker
+            OrderCancellationReason::PositionUpdateViolation,
+            std::string::utf8(b"Position Update violation"),
+            callbacks
+        );
+    };
+
+    let is_taker_order =
+        self.order_book.is_taker_order(price, is_bid, trigger_condition);
+    if (emit_taker_order_open) {
+        emit_event_for_order(
+            self,
+            order_id,
+            user_addr,
+            orig_size,
+            remaining_size,
+            orig_size,
+            price,
+            is_bid,
+            is_taker_order,
+            ORDER_STATUS_OPEN,
+            &std::string::utf8(b"")
+        );
+    };
+    if (!is_taker_order) {
+        return self.place_maker_order_internal(
+            user_addr,
+            price,
+            orig_size,
+            remaining_size,
+            vector[],
+            is_bid,
+            time_in_force,
+            trigger_condition,
+            metadata,
+            order_id,
+            false,
+            callbacks
+        );
+    };
+
+    // NOTE: We should always use is_taker: true for this order past this
+    // point so that indexer can consistently track the order's status
+    if (time_in_force == TIME_IN_FORCE_POST_ONLY) {
+        return self.cancel_order_internal(
+            user_addr,
+            price,
+            order_id,
+            orig_size,
+            remaining_size,
+            vector[],
+            is_bid,
+            true, // is_taker
+            OrderCancellationReason::PostOnlyViolation,
+            std::string::utf8(b"Post Only violation"),
+            callbacks
+        );
+    };
+    let fill_sizes = vector::empty();
+    loop {
+        let result =
+            self.order_book.get_single_match_for_taker(price, remaining_size, is_bid);
+        let (maker_order, maker_matched_size) = result.destroy_single_order_match();
+        let (maker_address, maker_order_id) =
+            maker_order.get_order_id().destroy_order_id_type();
+        if (!self.config.allow_self_trade && maker_address == user_addr) {
+            self.cancel_maker_order_internal(
+                &maker_order,
+                maker_order_id,
+                maker_address,
+                std::string::utf8(b"Disallowed self trading"),
+                maker_matched_size,
+                callbacks
+            );
+            continue;
+        };
+
+        let fill_id = self.next_fill_id();
+
+        let settle_result =
+            callbacks.settle_trade(
+                user_addr,
+                maker_address,
+                order_id,
+                maker_order_id,
+                fill_id,
+                is_bid,
+                maker_order.get_price(), // Order is always matched at the price of the maker
+                maker_matched_size,
+                metadata,
+                maker_order.get_metadata_from_order()
+            );
+
+        let unsettled_maker_size = maker_matched_size;
+        let settled_size = settle_result.get_settled_size();
+        if (settled_size > 0) {
+            remaining_size -= settled_size;
+            unsettled_maker_size -= settled_size;
+            fill_sizes.push_back(settled_size);
+            // Event for taker fill
+            emit_event_for_order(
+                self,
+                order_id,
+                user_addr,
+                orig_size,
+                remaining_size,
+                settled_size,
+                maker_order.get_price(),
+                is_bid,
+                true, // is_taker
+                ORDER_STATUS_FILLED,
+                &std::string::utf8(b"")
+            );
+            // Event for maker fill
+            emit_event_for_order(
+                self,
+                maker_order_id,
+                maker_address,
+                maker_order.get_orig_size(),
+                maker_order.get_remaining_size() + unsettled_maker_size,
+                settled_size,
+                maker_order.get_price(),
+                !is_bid,
+                false, // is_taker
+                ORDER_STATUS_FILLED,
+                &std::string::utf8(b"")
+            );
+        };
+
+        let maker_cancellation_reason = settle_result.get_maker_cancellation_reason();
+        if (maker_cancellation_reason.is_some()) {
+            self.cancel_maker_order_internal(
+                &maker_order,
+                maker_order_id,
+                maker_address,
+                maker_cancellation_reason.destroy_some(),
+                unsettled_maker_size,
+                callbacks
+            );
+        };
+
+        let taker_cancellation_reason = settle_result.get_taker_cancellation_reason();
+        if (taker_cancellation_reason.is_some()) {
+            let result =
+                self.cancel_order_internal(
+                    user_addr,
+                    price,
+                    order_id,
+                    orig_size,
+                    remaining_size,
+                    fill_sizes,
+                    is_bid,
+                    true, // is_taker
+                    OrderCancellationReason::ClearinghouseSettleViolation,
+                    taker_cancellation_reason.destroy_some(),
+                    callbacks
+                );
+            if (maker_cancellation_reason.is_none() && unsettled_maker_size > 0) {
+                // If the taker is cancelled but the maker is not cancelled, then we need to re-insert
+                // the maker order back into the order book
+                self.order_book.reinsert_maker_order(
+                    new_order_request(
+                        maker_address,
+                        maker_order_id,
+                        option::some(maker_order.get_unique_priority_idx()),
+                        maker_order.get_price(),
+                        maker_order.get_orig_size(),
+                        unsettled_maker_size,
+                        !is_bid,
+                        option::none(),
+                        maker_order.get_metadata_from_order()
+                    )
+                );
+            };
+            return result;
+        };
+
+        if (maker_order.get_remaining_size() == 0) {
+            callbacks.cleanup_order(
+                maker_address,
+                maker_order_id,
+                !is_bid, // is_bid is inverted for maker orders
+                0 // 0 because the order is fully filled
+            );
+        };
+        if (remaining_size == 0) {
+            callbacks.cleanup_order(
+                user_addr, order_id, is_bid, 0 // 0 because the order is fully filled
+            );
+            break;
+        };
+
+        // Check if the next iteration will still match
+        let is_taker_order =
+            self.order_book.is_taker_order(price, is_bid, option::none());
+        if (!is_taker_order) {
+            if (time_in_force == TIME_IN_FORCE_IOC) {
+                return self.cancel_order_internal(
+                    user_addr,
+                    price,
+                    order_id,
+                    orig_size,
+                    remaining_size,
+                    fill_sizes,
+                    is_bid,
+                    true, // is_taker
+                    OrderCancellationReason::IOCViolation,
+                    std::string::utf8(b"IOC_VIOLATION"),
+                    callbacks
+                );
+            } else {
+                // If the order is not a taker order, then we can place it as a maker order
+                return self.place_maker_order_internal(
+                    user_addr,
+                    price,
+                    orig_size,
+                    remaining_size,
+                    fill_sizes,
+                    is_bid,
+                    time_in_force,
+                    trigger_condition,
+                    metadata,
+                    order_id,
+                    true, // emit_order_open
+                    callbacks
+                );
+            };
+        };
+
+        if (fill_sizes.length() >= max_fill_limit) {
+            if (cancel_on_fill_limit) {
+                return self.cancel_order_internal(
+                    user_addr,
+                    price,
+                    order_id,
+                    orig_size,
+                    remaining_size,
+                    fill_sizes,
+                    is_bid,
+                    true, // is_taker
+                    OrderCancellationReason::MaxFillLimitViolation,
+                    std::string::utf8(b"Max fill limit reached"),
+                    callbacks
+                );
+            } else {
+                return OrderMatchResult {
+                    order_id,
+                    remaining_size,
+                    cancel_reason: option::some(
+                        OrderCancellationReason::MaxFillLimitViolation
+                    ),
+                    fill_sizes
+                }
+            };
+        };
+    };
+    OrderMatchResult {
+        order_id,
+        remaining_size,
+        cancel_reason: option::none(),
+        fill_sizes
+    }
+}
+
+ + + +
+ + + +## Function `cancel_order` + +Cancels an order - this will cancel the order and emit an event for the order cancellation. + + +
public fun cancel_order<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, order_id: u64, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
+
+ + + +
+Implementation + + +
public fun cancel_order<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user: &signer,
+    order_id: u64,
+    callbacks: &MarketClearinghouseCallbacks<M>
+) {
+    let account = signer::address_of(user);
+    let maybe_order = self.order_book.cancel_order(account, order_id);
+    if (maybe_order.is_some()) {
+        let order = maybe_order.destroy_some();
+        let (
+            order_id_type,
+            _unique_priority_idx,
+            price,
+            orig_size,
+            remaining_size,
+            is_bid,
+            _trigger_condition,
+            _metadata
+        ) = order.destroy_order();
+        callbacks.cleanup_order(
+            account, order_id, is_bid, remaining_size
+        );
+        let (user, order_id) = order_id_type.destroy_order_id_type();
+        emit_event_for_order(
+            self,
+            order_id,
+            user,
+            orig_size,
+            remaining_size,
+            remaining_size,
+            price,
+            is_bid,
+            false, // is_taker
+            ORDER_STATUS_CANCELLED,
+            &std::string::utf8(b"Order cancelled")
+        );
+    }
+}
+
+ + + +
+ + + +## Function `decrease_order_size` + +Cancels an order - this will cancel the order and emit an event for the order cancellation. + + +
public fun decrease_order_size<M: copy, drop, store>(self: &mut market::Market<M>, user: &signer, order_id: u64, size_delta: u64, callbacks: &market_types::MarketClearinghouseCallbacks<M>)
+
+ + + +
+Implementation + + +
public fun decrease_order_size<M: store + copy + drop>(
+    self: &mut Market<M>,
+    user: &signer,
+    order_id: u64,
+    size_delta: u64,
+    callbacks: &MarketClearinghouseCallbacks<M>
+) {
+    let account = signer::address_of(user);
+    self.order_book.decrease_order_size(account, order_id, size_delta);
+    let maybe_order = self.order_book.get_order(account, order_id);
+    assert!(maybe_order.is_some(), EORDER_DOES_NOT_EXIST);
+    let (order, _) = maybe_order.destroy_some().destroy_order_from_state();
+    let (
+        order_id_type,
+        _unique_priority_idx,
+        price,
+        orig_size,
+        remaining_size,
+        is_bid,
+        _trigger_condition,
+        _metadata
+    ) = order.destroy_order();
+    let (user, order_id) = order_id_type.destroy_order_id_type();
+    callbacks.decrease_order_size(
+        user, order_id, is_bid, price, remaining_size
+    );
+
+    emit_event_for_order(
+        self,
+        order_id,
+        user,
+        orig_size,
+        remaining_size,
+        size_delta,
+        price,
+        is_bid,
+        false, // is_taker
+        ORDER_SIZE_REDUCED,
+        &std::string::utf8(b"Order size reduced")
+    );
+}
+
+ + + +
+ + + +## Function `get_remaining_size` + +Remaining size of the order in the order book. + + +
public fun get_remaining_size<M: copy, drop, store>(self: &market::Market<M>, user: address, order_id: u64): u64
+
+ + + +
+Implementation + + +
public fun get_remaining_size<M: store + copy + drop>(
+    self: &Market<M>, user: address, order_id: u64
+): u64 {
+    self.order_book.get_remaining_size(user, order_id)
+}
+
+ + + +
+ + + +## Function `take_ready_price_based_orders` + +Returns all the pending order ready to be executed based on the oracle price. The caller is responsible to +call the place_order_with_order_id API to place the order with the order id returned from this API. + + +
public fun take_ready_price_based_orders<M: copy, drop, store>(self: &mut market::Market<M>, oracle_price: u64): vector<order_book_types::Order<M>>
+
+ + + +
+Implementation + + +
public fun take_ready_price_based_orders<M: store + copy + drop>(
+    self: &mut Market<M>, oracle_price: u64
+): vector<Order<M>> {
+    self.order_book.take_ready_price_based_orders(oracle_price)
+}
+
+ + + +
+ + + +## Function `take_ready_time_based_orders` + +Returns all the pending order that are ready to be executed based on current time stamp. The caller is responsible to +call the place_order_with_order_id API to place the order with the order id returned from this API. + + +
public fun take_ready_time_based_orders<M: copy, drop, store>(self: &mut market::Market<M>): vector<order_book_types::Order<M>>
+
+ + + +
+Implementation + + +
public fun take_ready_time_based_orders<M: store + copy + drop>(
+    self: &mut Market<M>
+): vector<Order<M>> {
+    self.order_book.take_ready_time_based_orders()
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/market_types.md b/aptos-move/framework/aptos-experimental/doc/market_types.md new file mode 100644 index 0000000000000..5febf56cc442d --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/market_types.md @@ -0,0 +1,442 @@ + + + +# Module `0x7::market_types` + + + +- [Struct `SettleTradeResult`](#0x7_market_types_SettleTradeResult) +- [Struct `MarketClearinghouseCallbacks`](#0x7_market_types_MarketClearinghouseCallbacks) +- [Constants](#@Constants_0) +- [Function `new_settle_trade_result`](#0x7_market_types_new_settle_trade_result) +- [Function `new_market_clearinghouse_callbacks`](#0x7_market_types_new_market_clearinghouse_callbacks) +- [Function `get_settled_size`](#0x7_market_types_get_settled_size) +- [Function `get_maker_cancellation_reason`](#0x7_market_types_get_maker_cancellation_reason) +- [Function `get_taker_cancellation_reason`](#0x7_market_types_get_taker_cancellation_reason) +- [Function `settle_trade`](#0x7_market_types_settle_trade) +- [Function `validate_order_placement`](#0x7_market_types_validate_order_placement) +- [Function `place_maker_order`](#0x7_market_types_place_maker_order) +- [Function `cleanup_order`](#0x7_market_types_cleanup_order) +- [Function `decrease_order_size`](#0x7_market_types_decrease_order_size) + + +
use 0x1::option;
+use 0x1::string;
+
+ + + + + +## Struct `SettleTradeResult` + + + +
struct SettleTradeResult has drop
+
+ + + +
+Fields + + +
+
+settled_size: u64 +
+
+ +
+
+maker_cancellation_reason: option::Option<string::String> +
+
+ +
+
+taker_cancellation_reason: option::Option<string::String> +
+
+ +
+
+ + +
+ + + +## Struct `MarketClearinghouseCallbacks` + + + +
struct MarketClearinghouseCallbacks<M: copy, drop, store> has drop
+
+ + + +
+Fields + + +
+
+settle_trade_f: |(address, address, u64, u64, u64, bool, u64, u64, M, M)|market_types::SettleTradeResult has copy + drop +
+
+ +
+
+validate_order_placement_f: |(address, u64, bool, bool, u64, u64, M)|bool has copy + drop +
+
+ +
+
+place_maker_order_f: |(address, u64, bool, u64, u64, M)| has copy + drop +
+
+ +
+
+cleanup_order_f: |(address, u64, bool, u64)| has copy + drop +
+
+ +
+
+decrease_order_size_f: |(address, u64, bool, u64, u64)| has copy + drop +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EINVALID_ADDRESS: u64 = 1;
+
+ + + + + + + +
const EINVALID_SETTLE_RESULT: u64 = 2;
+
+ + + + + +## Function `new_settle_trade_result` + + + +
public fun new_settle_trade_result(settled_size: u64, maker_cancellation_reason: option::Option<string::String>, taker_cancellation_reason: option::Option<string::String>): market_types::SettleTradeResult
+
+ + + +
+Implementation + + +
public fun new_settle_trade_result(
+    settled_size: u64,
+    maker_cancellation_reason: Option<String>,
+    taker_cancellation_reason: Option<String>
+): SettleTradeResult {
+    SettleTradeResult {
+        settled_size,
+        maker_cancellation_reason,
+        taker_cancellation_reason
+    }
+}
+
+ + + +
+ + + +## Function `new_market_clearinghouse_callbacks` + + + +
public fun new_market_clearinghouse_callbacks<M: copy, drop, store>(settle_trade_f: |(address, address, u64, u64, u64, bool, u64, u64, M, M)|market_types::SettleTradeResult has copy + drop, validate_order_placement_f: |(address, u64, bool, bool, u64, u64, M)|bool has copy + drop, place_maker_order_f: |(address, u64, bool, u64, u64, M)| has copy + drop, cleanup_order_f: |(address, u64, bool, u64)| has copy + drop, decrease_order_size_f: |(address, u64, bool, u64, u64)| has copy + drop): market_types::MarketClearinghouseCallbacks<M>
+
+ + + +
+Implementation + + +
public fun new_market_clearinghouse_callbacks<M: store + copy + drop>(
+    // settle_trade_f arguments: taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size
+    settle_trade_f: |address, address, u64, u64, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy,
+    // validate_settlement_update_f arguments: accoun, is_taker, is_long, price, size
+    validate_order_placement_f: |address, u64, bool, bool, u64, u64, M| bool has drop + copy,
+    place_maker_order_f: |address, u64, bool, u64, u64, M| has drop + copy,
+    cleanup_order_f: |address, u64, bool, u64| has drop + copy,
+    decrease_order_size_f: |address, u64, bool, u64, u64| has drop + copy,
+): MarketClearinghouseCallbacks<M> {
+    MarketClearinghouseCallbacks {
+        settle_trade_f,
+        validate_order_placement_f,
+        place_maker_order_f,
+        cleanup_order_f,
+        decrease_order_size_f
+    }
+}
+
+ + + +
+ + + +## Function `get_settled_size` + + + +
public fun get_settled_size(self: &market_types::SettleTradeResult): u64
+
+ + + +
+Implementation + + +
public fun get_settled_size(self: &SettleTradeResult): u64 {
+    self.settled_size
+}
+
+ + + +
+ + + +## Function `get_maker_cancellation_reason` + + + +
public fun get_maker_cancellation_reason(self: &market_types::SettleTradeResult): option::Option<string::String>
+
+ + + +
+Implementation + + +
public fun get_maker_cancellation_reason(self: &SettleTradeResult): Option<String> {
+    self.maker_cancellation_reason
+}
+
+ + + +
+ + + +## Function `get_taker_cancellation_reason` + + + +
public fun get_taker_cancellation_reason(self: &market_types::SettleTradeResult): option::Option<string::String>
+
+ + + +
+Implementation + + +
public fun get_taker_cancellation_reason(self: &SettleTradeResult): Option<String> {
+    self.taker_cancellation_reason
+}
+
+ + + +
+ + + +## Function `settle_trade` + + + +
public fun settle_trade<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, taker: address, maker: address, taker_order_id: u64, maker_order_id: u64, fill_id: u64, is_taker_long: bool, price: u64, size: u64, taker_metadata: M, maker_metadata: M): market_types::SettleTradeResult
+
+ + + +
+Implementation + + +
public fun settle_trade<M: store + copy + drop>(
+    self: &MarketClearinghouseCallbacks<M>,
+    taker: address,
+    maker: address,
+    taker_order_id: u64,
+    maker_order_id:u64,
+    fill_id: u64,
+    is_taker_long: bool,
+    price: u64,
+    size: u64,
+    taker_metadata: M,
+    maker_metadata: M): SettleTradeResult {
+    (self.settle_trade_f)(taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size, taker_metadata, maker_metadata)
+}
+
+ + + +
+ + + +## Function `validate_order_placement` + + + +
public fun validate_order_placement<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: u64, is_taker: bool, is_bid: bool, price: u64, size: u64, order_metadata: M): bool
+
+ + + +
+Implementation + + +
public fun validate_order_placement<M: store + copy + drop>(
+    self: &MarketClearinghouseCallbacks<M>,
+    account: address,
+    order_id: u64,
+    is_taker: bool,
+    is_bid: bool,
+    price: u64,
+    size: u64,
+    order_metadata: M): bool {
+    (self.validate_order_placement_f)(account, order_id, is_taker, is_bid, price, size, order_metadata)
+}
+
+ + + +
+ + + +## Function `place_maker_order` + + + +
public fun place_maker_order<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: u64, is_bid: bool, price: u64, size: u64, order_metadata: M)
+
+ + + +
+Implementation + + +
public fun place_maker_order<M: store + copy + drop>(
+    self: &MarketClearinghouseCallbacks<M>,
+    account: address,
+    order_id: u64,
+    is_bid: bool,
+    price: u64,
+    size: u64,
+    order_metadata: M) {
+    (self.place_maker_order_f)(account, order_id, is_bid, price, size, order_metadata)
+}
+
+ + + +
+ + + +## Function `cleanup_order` + + + +
public fun cleanup_order<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: u64, is_bid: bool, remaining_size: u64)
+
+ + + +
+Implementation + + +
public fun cleanup_order<M: store + copy + drop>(
+    self: &MarketClearinghouseCallbacks<M>,
+    account: address,
+    order_id: u64,
+    is_bid: bool,
+    remaining_size: u64) {
+    (self.cleanup_order_f)(account, order_id, is_bid, remaining_size)
+}
+
+ + + +
+ + + +## Function `decrease_order_size` + + + +
public fun decrease_order_size<M: copy, drop, store>(self: &market_types::MarketClearinghouseCallbacks<M>, account: address, order_id: u64, is_bid: bool, price: u64, size: u64)
+
+ + + +
+Implementation + + +
public fun decrease_order_size<M: store + copy + drop>(
+    self: &MarketClearinghouseCallbacks<M>,
+    account: address,
+    order_id: u64,
+    is_bid: bool,
+    price: u64,
+    size: u64,) {
+    (self.decrease_order_size_f)(account, order_id, is_bid, price, size)
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/order_book.md b/aptos-move/framework/aptos-experimental/doc/order_book.md new file mode 100644 index 0000000000000..bde80d0e7a1e7 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/order_book.md @@ -0,0 +1,1029 @@ + + + +# Module `0x7::order_book` + +This module provides a core order book functionality for a trading system. On a high level, it has three major +components +1. ActiveOrderBook: This is the main order book that keeps track of active orders and their states. The active order +book is backed by a BigOrderedMap, which is a data structure that allows for efficient insertion, deletion, and matching of the order +The orders are matched based on time-price priority. +2. PendingOrderBookIndex: This keeps track of pending orders. The pending orders are those that are not active yet. Three +types of pending orders are supported. +- Price move up - Trigggered when the price moves above a certain price level +- Price move down - Triggered when the price moves below a certain price level +- Time based - Triggered when a certain time has passed +3. Orders: This is a BigOrderMap of order id to order details. + + +- [Struct `OrderRequest`](#0x7_order_book_OrderRequest) +- [Enum `OrderBook`](#0x7_order_book_OrderBook) +- [Enum `OrderType`](#0x7_order_book_OrderType) +- [Struct `TestMetadata`](#0x7_order_book_TestMetadata) +- [Constants](#@Constants_0) +- [Function `new_order_request`](#0x7_order_book_new_order_request) +- [Function `new_order_book`](#0x7_order_book_new_order_book) +- [Function `cancel_order`](#0x7_order_book_cancel_order) +- [Function `is_taker_order`](#0x7_order_book_is_taker_order) +- [Function `place_maker_order`](#0x7_order_book_place_maker_order) +- [Function `reinsert_maker_order`](#0x7_order_book_reinsert_maker_order) +- [Function `place_pending_maker_order`](#0x7_order_book_place_pending_maker_order) +- [Function `get_single_match_for_taker`](#0x7_order_book_get_single_match_for_taker) +- [Function `decrease_order_size`](#0x7_order_book_decrease_order_size) +- [Function `is_active_order`](#0x7_order_book_is_active_order) +- [Function `get_order`](#0x7_order_book_get_order) +- [Function `get_remaining_size`](#0x7_order_book_get_remaining_size) +- [Function `take_ready_price_based_orders`](#0x7_order_book_take_ready_price_based_orders) +- [Function `best_bid_price`](#0x7_order_book_best_bid_price) +- [Function `best_ask_price`](#0x7_order_book_best_ask_price) +- [Function `get_slippage_price`](#0x7_order_book_get_slippage_price) +- [Function `take_ready_time_based_orders`](#0x7_order_book_take_ready_time_based_orders) +- [Function `place_order_and_get_matches`](#0x7_order_book_place_order_and_get_matches) + + +
use 0x1::big_ordered_map;
+use 0x1::error;
+use 0x1::option;
+use 0x1::vector;
+use 0x7::active_order_book;
+use 0x7::order_book_types;
+use 0x7::pending_order_book_index;
+
+ + + + + +## Struct `OrderRequest` + + + +
struct OrderRequest<M: copy, drop, store> has copy, drop
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+account_order_id: u64 +
+
+ +
+
+unique_priority_idx: option::Option<order_book_types::UniqueIdxType> +
+
+ +
+
+price: u64 +
+
+ +
+
+orig_size: u64 +
+
+ +
+
+remaining_size: u64 +
+
+ +
+
+is_buy: bool +
+
+ +
+
+trigger_condition: option::Option<order_book_types::TriggerCondition> +
+
+ +
+
+metadata: M +
+
+ +
+
+ + +
+ + + +## Enum `OrderBook` + + + +
enum OrderBook<M: copy, drop, store> has store
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+orders: big_ordered_map::BigOrderedMap<order_book_types::OrderIdType, order_book_types::OrderWithState<M>> +
+
+ +
+
+active_orders: active_order_book::ActiveOrderBook +
+
+ +
+
+pending_orders: pending_order_book_index::PendingOrderBookIndex +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Enum `OrderType` + + + +
enum OrderType has copy, drop, store
+
+ + + +
+Variants + + +
+GoodTilCancelled + + +
+Fields + + +
+
+ + +
+ +
+ +
+PostOnly + + +
+Fields + + +
+
+ + +
+ +
+ +
+FillOrKill + + +
+Fields + + +
+
+ + +
+ +
+ +
+ + + +## Struct `TestMetadata` + + + +
struct TestMetadata has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const U256_MAX: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
+
+ + + + + + + +
const EORDER_ALREADY_EXISTS: u64 = 1;
+
+ + + + + + + +
const EINVALID_ADD_SIZE_TO_ORDER: u64 = 6;
+
+ + + + + + + +
const EINVALID_INACTIVE_ORDER_STATE: u64 = 5;
+
+ + + + + + + +
const EORDER_NOT_FOUND: u64 = 4;
+
+ + + + + + + +
const EPOST_ONLY_FILLED: u64 = 2;
+
+ + + + + + + +
const E_NOT_ACTIVE_ORDER: u64 = 7;
+
+ + + + + +## Function `new_order_request` + + + +
public fun new_order_request<M: copy, drop, store>(account: address, account_order_id: u64, unique_priority_idx: option::Option<order_book_types::UniqueIdxType>, price: u64, orig_size: u64, remaining_size: u64, is_buy: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M): order_book::OrderRequest<M>
+
+ + + +
+Implementation + + +
public fun new_order_request<M: store + copy + drop>(
+    account: address,
+    account_order_id: u64,
+    unique_priority_idx: Option<UniqueIdxType>,
+    price: u64,
+    orig_size: u64,
+    remaining_size: u64,
+    is_buy: bool,
+    trigger_condition: Option<TriggerCondition>,
+    metadata: M
+): OrderRequest<M> {
+    OrderRequest {
+        account,
+        account_order_id,
+        unique_priority_idx,
+        price,
+        orig_size,
+        remaining_size,
+        is_buy,
+        trigger_condition,
+        metadata
+    }
+}
+
+ + + +
+ + + +## Function `new_order_book` + + + +
public fun new_order_book<M: copy, drop, store>(): order_book::OrderBook<M>
+
+ + + +
+Implementation + + +
public fun new_order_book<M: store + copy + drop>(): OrderBook<M> {
+    OrderBook::V1 {
+        orders: new_default_big_ordered_map(),
+        active_orders: new_active_order_book(),
+        pending_orders: new_pending_order_book_index()
+    }
+}
+
+ + + +
+ + + +## Function `cancel_order` + +Cancels an order from the order book. If the order is active, it is removed from the active order book else +it is removed from the pending order book. The API doesn't abort if the order is not found in the order book - +this is a TODO for now. + + +
public fun cancel_order<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, account: address, account_order_id: u64): option::Option<order_book_types::Order<M>>
+
+ + + +
+Implementation + + +
public fun cancel_order<M: store + copy + drop>(
+    self: &mut OrderBook<M>, account: address, account_order_id: u64
+): Option<Order<M>> {
+    let order_id = new_order_id_type(account, account_order_id);
+    assert!(self.orders.contains(&order_id), EORDER_NOT_FOUND);
+    let order_with_state = self.orders.remove(&order_id);
+    let (order, is_active) = order_with_state.destroy_order_from_state();
+    if (is_active) {
+        let (_, unique_priority_idx, bid_price, _orig_size, _size, is_buy, _, _) =
+            order.destroy_order();
+        self.active_orders.cancel_active_order(bid_price, unique_priority_idx, is_buy);
+    } else {
+        let (
+            _,
+            unique_priority_idx,
+            _bid_price,
+            _orig_size,
+            _size,
+            is_buy,
+            trigger_condition,
+            _
+        ) = order.destroy_order();
+        self.pending_orders.cancel_pending_order(
+            trigger_condition.destroy_some(), unique_priority_idx, is_buy
+        );
+    };
+    return option::some(order)
+}
+
+ + + +
+ + + +## Function `is_taker_order` + +Checks if the order is a taker order i.e., matched immediatedly with the active order book. + + +
public fun is_taker_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, price: u64, is_buy: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>): bool
+
+ + + +
+Implementation + + +
public fun is_taker_order<M: store + copy + drop>(
+    self: &OrderBook<M>,
+    price: u64,
+    is_buy: bool,
+    trigger_condition: Option<TriggerCondition>
+): bool {
+    if (trigger_condition.is_some()) {
+        return false;
+    };
+    return self.active_orders.is_taker_order(price, is_buy)
+}
+
+ + + +
+ + + +## Function `place_maker_order` + +Places a maker order to the order book. If the order is a pending order, it is added to the pending order book +else it is added to the active order book. The API aborts if its not a maker order or if the order already exists + + +
public fun place_maker_order<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_req: order_book::OrderRequest<M>)
+
+ + + +
+Implementation + + +
public fun place_maker_order<M: store + copy + drop>(
+    self: &mut OrderBook<M>, order_req: OrderRequest<M>
+) {
+    if (order_req.trigger_condition.is_some()) {
+        return self.place_pending_maker_order(order_req);
+    };
+
+    let order_id = new_order_id_type(order_req.account, order_req.account_order_id);
+    let unique_priority_idx =
+        if (order_req.unique_priority_idx.is_some()) {
+            order_req.unique_priority_idx.destroy_some()
+        } else {
+            generate_unique_idx_fifo_tiebraker()
+        };
+
+    assert!(
+        !self.orders.contains(&order_id),
+        error::invalid_argument(EORDER_ALREADY_EXISTS)
+    );
+
+    let order =
+        new_order(
+            order_id,
+            unique_priority_idx,
+            order_req.price,
+            order_req.orig_size,
+            order_req.remaining_size,
+            order_req.is_buy,
+            order_req.trigger_condition,
+            order_req.metadata
+        );
+    self.orders.add(order_id, new_order_with_state(order, true));
+    self.active_orders.place_maker_order(
+        order_id,
+        order_req.price,
+        unique_priority_idx,
+        order_req.remaining_size,
+        order_req.is_buy
+    );
+}
+
+ + + +
+ + + +## Function `reinsert_maker_order` + +Reinserts a maker order to the order book. This is used when the order is removed from the order book +but the clearinghouse fails to settle all or part of the order. If the order doesn't exist in the order book, +it is added to the order book, if it exists, it's size is updated. + + +
public fun reinsert_maker_order<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_req: order_book::OrderRequest<M>)
+
+ + + +
+Implementation + + +
public fun reinsert_maker_order<M: store + copy + drop>(
+    self: &mut OrderBook<M>, order_req: OrderRequest<M>
+) {
+    assert!(order_req.trigger_condition.is_none(), E_NOT_ACTIVE_ORDER);
+    let order_id = new_order_id_type(order_req.account, order_req.account_order_id);
+    if (!self.orders.contains(&order_id)) {
+        return self.place_maker_order(order_req);
+    };
+    let order_with_state = self.orders.remove(&order_id);
+    order_with_state.increase_remaining_size(order_req.remaining_size);
+    self.orders.add(order_id, order_with_state);
+    self.active_orders.increase_order_size(
+        order_req.price,
+        order_req.unique_priority_idx.destroy_some(),
+        order_req.remaining_size,
+        order_req.is_buy
+    );
+}
+
+ + + +
+ + + +## Function `place_pending_maker_order` + + + +
fun place_pending_maker_order<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_req: order_book::OrderRequest<M>)
+
+ + + +
+Implementation + + +
fun place_pending_maker_order<M: store + copy + drop>(
+    self: &mut OrderBook<M>, order_req: OrderRequest<M>
+) {
+    let order_id = new_order_id_type(order_req.account, order_req.account_order_id);
+    let unique_priority_idx =
+        if (order_req.unique_priority_idx.is_some()) {
+            order_req.unique_priority_idx.destroy_some()
+        } else {
+            generate_unique_idx_fifo_tiebraker()
+        };
+    let order =
+        new_order(
+            order_id,
+            unique_priority_idx,
+            order_req.price,
+            order_req.orig_size,
+            order_req.remaining_size,
+            order_req.is_buy,
+            order_req.trigger_condition,
+            order_req.metadata
+        );
+
+    self.orders.add(order_id, new_order_with_state(order, false));
+
+    self.pending_orders.place_pending_maker_order(
+        order_id,
+        order_req.trigger_condition.destroy_some(),
+        unique_priority_idx,
+        order_req.is_buy
+    );
+}
+
+ + + +
+ + + +## Function `get_single_match_for_taker` + +Returns a single match for a taker order. It is responsibility of the caller to first call the is_taker_order +API to ensure that the order is a taker order before calling this API, otherwise it will abort. + + +
public fun get_single_match_for_taker<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, price: u64, size: u64, is_buy: bool): order_book_types::SingleOrderMatch<M>
+
+ + + +
+Implementation + + +
public fun get_single_match_for_taker<M: store + copy + drop>(
+    self: &mut OrderBook<M>,
+    price: u64,
+    size: u64,
+    is_buy: bool
+): SingleOrderMatch<M> {
+    let result = self.active_orders.get_single_match_result(price, size, is_buy);
+    let (order_id, matched_size, remaining_size) =
+        result.destroy_active_matched_order();
+    let order_with_state = self.orders.remove(&order_id);
+    order_with_state.set_remaining_size(remaining_size);
+    if (remaining_size > 0) {
+        self.orders.add(order_id, order_with_state);
+    };
+    let (order, is_active) = order_with_state.destroy_order_from_state();
+    assert!(is_active, EINVALID_INACTIVE_ORDER_STATE);
+    new_single_order_match(order, matched_size)
+}
+
+ + + +
+ + + +## Function `decrease_order_size` + +Decrease the size of the order by the given size delta. The API aborts if the order is not found in the order book or +if the size delta is greater than or equal to the remaining size of the order. Please note that the API will abort and +not cancel the order if the size delta is equal to the remaining size of the order, to avoid unintended +cancellation of the order. Please use the cancel_order API to cancel the order. + + +
public fun decrease_order_size<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, account: address, account_order_id: u64, size_delta: u64)
+
+ + + +
+Implementation + + +
public fun decrease_order_size<M: store + copy + drop>(
+    self: &mut OrderBook<M>, account: address, account_order_id: u64, size_delta: u64
+) {
+    let order_id = new_order_id_type(account, account_order_id);
+    assert!(self.orders.contains(&order_id), EORDER_NOT_FOUND);
+    let order_with_state = self.orders.remove(&order_id);
+    order_with_state.decrease_remaining_size(size_delta);
+    if (order_with_state.is_active_order()) {
+        let order = order_with_state.get_order_from_state();
+        self.active_orders.decrease_order_size(
+            order.get_price(),
+            order_with_state.get_unique_priority_idx_from_state(),
+            size_delta,
+            order.is_bid()
+        );
+    };
+    self.orders.add(order_id, order_with_state);
+}
+
+ + + +
+ + + +## Function `is_active_order` + + + +
public fun is_active_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, account: address, account_order_id: u64): bool
+
+ + + +
+Implementation + + +
public fun is_active_order<M: store + copy + drop>(
+    self: &OrderBook<M>, account: address, account_order_id: u64
+): bool {
+    let order_id = new_order_id_type(account, account_order_id);
+    if (!self.orders.contains(&order_id)) {
+        return false;
+    };
+    self.orders.borrow(&order_id).is_active_order()
+}
+
+ + + +
+ + + +## Function `get_order` + + + +
public fun get_order<M: copy, drop, store>(self: &order_book::OrderBook<M>, account: address, account_order_id: u64): option::Option<order_book_types::OrderWithState<M>>
+
+ + + +
+Implementation + + +
public fun get_order<M: store + copy + drop>(
+    self: &OrderBook<M>, account: address, account_order_id: u64
+): Option<OrderWithState<M>> {
+    let order_id = new_order_id_type(account, account_order_id);
+    if (!self.orders.contains(&order_id)) {
+        return option::none();
+    };
+    option::some(*self.orders.borrow(&order_id))
+}
+
+ + + +
+ + + +## Function `get_remaining_size` + + + +
public fun get_remaining_size<M: copy, drop, store>(self: &order_book::OrderBook<M>, account: address, account_order_id: u64): u64
+
+ + + +
+Implementation + + +
public fun get_remaining_size<M: store + copy + drop>(
+    self: &OrderBook<M>, account: address, account_order_id: u64
+): u64 {
+    let order_id = new_order_id_type(account, account_order_id);
+    if (!self.orders.contains(&order_id)) {
+        return 0;
+    };
+    self.orders.borrow(&order_id).get_remaining_size_from_state()
+}
+
+ + + +
+ + + +## Function `take_ready_price_based_orders` + +Removes and returns the orders that are ready to be executed based on the current price. + + +
public fun take_ready_price_based_orders<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, current_price: u64): vector<order_book_types::Order<M>>
+
+ + + +
+Implementation + + +
public fun take_ready_price_based_orders<M: store + copy + drop>(
+    self: &mut OrderBook<M>, current_price: u64
+): vector<Order<M>> {
+    let self_orders = &mut self.orders;
+    let order_ids = self.pending_orders.take_ready_price_based_orders(current_price);
+    let orders = vector::empty();
+
+    order_ids.for_each(|order_id| {
+        let order_with_state = self_orders.remove(&order_id);
+        let (order, _) = order_with_state.destroy_order_from_state();
+        orders.push_back(order);
+    });
+    orders
+}
+
+ + + +
+ + + +## Function `best_bid_price` + + + +
public fun best_bid_price<M: copy, drop, store>(self: &order_book::OrderBook<M>): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun best_bid_price<M: store + copy + drop>(self: &OrderBook<M>): Option<u64> {
+    self.active_orders.best_bid_price()
+}
+
+ + + +
+ + + +## Function `best_ask_price` + + + +
public fun best_ask_price<M: copy, drop, store>(self: &order_book::OrderBook<M>): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun best_ask_price<M: store + copy + drop>(self: &OrderBook<M>): Option<u64> {
+    self.active_orders.best_ask_price()
+}
+
+ + + +
+ + + +## Function `get_slippage_price` + + + +
public fun get_slippage_price<M: copy, drop, store>(self: &order_book::OrderBook<M>, is_buy: bool, slippage_pct: u64): option::Option<u64>
+
+ + + +
+Implementation + + +
public fun get_slippage_price<M: store + copy + drop>(
+    self: &OrderBook<M>, is_buy: bool, slippage_pct: u64
+): Option<u64> {
+    self.active_orders.get_slippage_price(is_buy, slippage_pct)
+}
+
+ + + +
+ + + +## Function `take_ready_time_based_orders` + +Removes and returns the orders that are ready to be executed based on the time condition. + + +
public fun take_ready_time_based_orders<M: copy, drop, store>(self: &mut order_book::OrderBook<M>): vector<order_book_types::Order<M>>
+
+ + + +
+Implementation + + +
public fun take_ready_time_based_orders<M: store + copy + drop>(
+    self: &mut OrderBook<M>
+): vector<Order<M>> {
+    let self_orders = &mut self.orders;
+    let order_ids = self.pending_orders.take_time_time_based_orders();
+    let orders = vector::empty();
+
+    order_ids.for_each(|order_id| {
+        let order_with_state = self_orders.remove(&order_id);
+        let (order, _) = order_with_state.destroy_order_from_state();
+        orders.push_back(order);
+    });
+    orders
+}
+
+ + + +
+ + + +## Function `place_order_and_get_matches` + + + +
public fun place_order_and_get_matches<M: copy, drop, store>(self: &mut order_book::OrderBook<M>, order_req: order_book::OrderRequest<M>): vector<order_book_types::SingleOrderMatch<M>>
+
+ + + +
+Implementation + + +
public fun place_order_and_get_matches<M: store + copy + drop>(
+    self: &mut OrderBook<M>, order_req: OrderRequest<M>
+): vector<SingleOrderMatch<M>> {
+    let match_results = vector::empty();
+    let remainig_size = order_req.remaining_size;
+    while (remainig_size > 0) {
+        if (!self.is_taker_order(order_req.price, order_req.is_buy, order_req.trigger_condition)) {
+            self.place_maker_order(
+                OrderRequest {
+                    account: order_req.account,
+                    account_order_id: order_req.account_order_id,
+                    unique_priority_idx: option::none(),
+                    price: order_req.price,
+                    orig_size: order_req.orig_size,
+                    remaining_size: remainig_size,
+                    is_buy: order_req.is_buy,
+                    trigger_condition: order_req.trigger_condition,
+                    metadata: order_req.metadata
+                }
+            );
+            return match_results;
+        };
+        let match_result =
+            self.get_single_match_for_taker(
+                order_req.price, remainig_size, order_req.is_buy
+            );
+        let matched_size = match_result.get_matched_size();
+        match_results.push_back(match_result);
+        remainig_size -= matched_size;
+    };
+    return match_results
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/order_book_types.md b/aptos-move/framework/aptos-experimental/doc/order_book_types.md new file mode 100644 index 0000000000000..885eaf1beee35 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/order_book_types.md @@ -0,0 +1,1461 @@ + + + +# Module `0x7::order_book_types` + +(work in progress) + + +- [Struct `OrderIdType`](#0x7_order_book_types_OrderIdType) +- [Struct `UniqueIdxType`](#0x7_order_book_types_UniqueIdxType) +- [Struct `ActiveMatchedOrder`](#0x7_order_book_types_ActiveMatchedOrder) +- [Struct `SingleOrderMatch`](#0x7_order_book_types_SingleOrderMatch) +- [Struct `Order`](#0x7_order_book_types_Order) +- [Enum `TriggerCondition`](#0x7_order_book_types_TriggerCondition) +- [Struct `OrderWithState`](#0x7_order_book_types_OrderWithState) +- [Constants](#@Constants_0) +- [Function `new_default_big_ordered_map`](#0x7_order_book_types_new_default_big_ordered_map) +- [Function `get_slippage_pct_precision`](#0x7_order_book_types_get_slippage_pct_precision) +- [Function `new_time_based_trigger_condition`](#0x7_order_book_types_new_time_based_trigger_condition) +- [Function `new_order_id_type`](#0x7_order_book_types_new_order_id_type) +- [Function `generate_unique_idx_fifo_tiebraker`](#0x7_order_book_types_generate_unique_idx_fifo_tiebraker) +- [Function `new_unique_idx_type`](#0x7_order_book_types_new_unique_idx_type) +- [Function `descending_idx`](#0x7_order_book_types_descending_idx) +- [Function `new_active_matched_order`](#0x7_order_book_types_new_active_matched_order) +- [Function `destroy_active_matched_order`](#0x7_order_book_types_destroy_active_matched_order) +- [Function `new_order`](#0x7_order_book_types_new_order) +- [Function `new_single_order_match`](#0x7_order_book_types_new_single_order_match) +- [Function `get_active_matched_size`](#0x7_order_book_types_get_active_matched_size) +- [Function `get_matched_size`](#0x7_order_book_types_get_matched_size) +- [Function `new_order_with_state`](#0x7_order_book_types_new_order_with_state) +- [Function `tp_trigger_condition`](#0x7_order_book_types_tp_trigger_condition) +- [Function `sl_trigger_condition`](#0x7_order_book_types_sl_trigger_condition) +- [Function `index`](#0x7_order_book_types_index) +- [Function `get_order_from_state`](#0x7_order_book_types_get_order_from_state) +- [Function `get_metadata_from_state`](#0x7_order_book_types_get_metadata_from_state) +- [Function `get_order_id`](#0x7_order_book_types_get_order_id) +- [Function `get_unique_priority_idx`](#0x7_order_book_types_get_unique_priority_idx) +- [Function `get_metadata_from_order`](#0x7_order_book_types_get_metadata_from_order) +- [Function `get_trigger_condition_from_order`](#0x7_order_book_types_get_trigger_condition_from_order) +- [Function `increase_remaining_size`](#0x7_order_book_types_increase_remaining_size) +- [Function `decrease_remaining_size`](#0x7_order_book_types_decrease_remaining_size) +- [Function `set_remaining_size`](#0x7_order_book_types_set_remaining_size) +- [Function `get_remaining_size_from_state`](#0x7_order_book_types_get_remaining_size_from_state) +- [Function `get_unique_priority_idx_from_state`](#0x7_order_book_types_get_unique_priority_idx_from_state) +- [Function `get_remaining_size`](#0x7_order_book_types_get_remaining_size) +- [Function `get_orig_size`](#0x7_order_book_types_get_orig_size) +- [Function `destroy_order_from_state`](#0x7_order_book_types_destroy_order_from_state) +- [Function `destroy_active_match_order`](#0x7_order_book_types_destroy_active_match_order) +- [Function `destroy_order`](#0x7_order_book_types_destroy_order) +- [Function `destroy_single_order_match`](#0x7_order_book_types_destroy_single_order_match) +- [Function `destroy_order_id_type`](#0x7_order_book_types_destroy_order_id_type) +- [Function `is_active_order`](#0x7_order_book_types_is_active_order) +- [Function `get_price`](#0x7_order_book_types_get_price) +- [Function `is_bid`](#0x7_order_book_types_is_bid) + + +
use 0x1::bcs;
+use 0x1::big_ordered_map;
+use 0x1::from_bcs;
+use 0x1::option;
+use 0x1::transaction_context;
+
+ + + + + +## Struct `OrderIdType` + + + +
struct OrderIdType has copy, drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+account_order_id: u64 +
+
+ +
+
+ + +
+ + + +## Struct `UniqueIdxType` + + + +
struct UniqueIdxType has copy, drop, store
+
+ + + +
+Fields + + +
+
+idx: u256 +
+
+ +
+
+ + +
+ + + +## Struct `ActiveMatchedOrder` + + + +
struct ActiveMatchedOrder has copy, drop
+
+ + + +
+Fields + + +
+
+order_id: order_book_types::OrderIdType +
+
+ +
+
+matched_size: u64 +
+
+ +
+
+remaining_size: u64 +
+
+ Remaining size of the maker order +
+
+ + +
+ + + +## Struct `SingleOrderMatch` + + + +
struct SingleOrderMatch<M: copy, drop, store> has copy, drop
+
+ + + +
+Fields + + +
+
+order: order_book_types::Order<M> +
+
+ +
+
+matched_size: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Order` + + + +
struct Order<M: copy, drop, store> has copy, drop, store
+
+ + + +
+Fields + + +
+
+order_id: order_book_types::OrderIdType +
+
+ +
+
+unique_priority_idx: order_book_types::UniqueIdxType +
+
+ +
+
+price: u64 +
+
+ +
+
+orig_size: u64 +
+
+ +
+
+remaining_size: u64 +
+
+ +
+
+is_bid: bool +
+
+ +
+
+trigger_condition: option::Option<order_book_types::TriggerCondition> +
+
+ +
+
+metadata: M +
+
+ +
+
+ + +
+ + + +## Enum `TriggerCondition` + + + +
enum TriggerCondition has copy, drop, store
+
+ + + +
+Variants + + +
+TakeProfit + + +
+Fields + + +
+
+0: u64 +
+
+ +
+
+ + +
+ +
+ +
+StopLoss + + +
+Fields + + +
+
+0: u64 +
+
+ +
+
+ + +
+ +
+ +
+TimeBased + + +
+Fields + + +
+
+0: u64 +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Struct `OrderWithState` + + + +
struct OrderWithState<M: copy, drop, store> has copy, drop, store
+
+ + + +
+Fields + + +
+
+order: order_book_types::Order<M> +
+
+ +
+
+is_active: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const U256_MAX: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
+
+ + + + + + + +
const BIG_MAP_INNER_DEGREE: u16 = 64;
+
+ + + + + + + +
const BIG_MAP_LEAF_DEGREE: u16 = 32;
+
+ + + + + + + +
const EINVALID_ORDER_SIZE_DECREASE: u64 = 4;
+
+ + + + + + + +
const EINVALID_TRIGGER_CONDITION: u64 = 2;
+
+ + + + + + + +
const EORDER_ALREADY_EXISTS: u64 = 1;
+
+ + + + + + + +
const INVALID_MATCH_RESULT: u64 = 3;
+
+ + + + + + + +
const SLIPPAGE_PCT_PRECISION: u64 = 100;
+
+ + + + + +## Function `new_default_big_ordered_map` + + + +
public(friend) fun new_default_big_ordered_map<K: store, V: store>(): big_ordered_map::BigOrderedMap<K, V>
+
+ + + +
+Implementation + + +
public(friend) fun new_default_big_ordered_map<K: store, V: store>(): BigOrderedMap<K, V> {
+    big_ordered_map::new_with_config(
+        BIG_MAP_INNER_DEGREE,
+        BIG_MAP_LEAF_DEGREE,
+        true
+    )
+}
+
+ + + +
+ + + +## Function `get_slippage_pct_precision` + + + +
public fun get_slippage_pct_precision(): u64
+
+ + + +
+Implementation + + +
public fun get_slippage_pct_precision(): u64 {
+    SLIPPAGE_PCT_PRECISION
+}
+
+ + + +
+ + + +## Function `new_time_based_trigger_condition` + + + +
public fun new_time_based_trigger_condition(time: u64): order_book_types::TriggerCondition
+
+ + + +
+Implementation + + +
public fun new_time_based_trigger_condition(time: u64): TriggerCondition {
+    TriggerCondition::TimeBased(time)
+}
+
+ + + +
+ + + +## Function `new_order_id_type` + + + +
public fun new_order_id_type(account: address, account_order_id: u64): order_book_types::OrderIdType
+
+ + + +
+Implementation + + +
public fun new_order_id_type(account: address, account_order_id: u64): OrderIdType {
+    OrderIdType { account, account_order_id }
+}
+
+ + + +
+ + + +## Function `generate_unique_idx_fifo_tiebraker` + + + +
public fun generate_unique_idx_fifo_tiebraker(): order_book_types::UniqueIdxType
+
+ + + +
+Implementation + + +
public fun generate_unique_idx_fifo_tiebraker(): UniqueIdxType {
+    // TODO change from random to monothonically increasing value
+    new_unique_idx_type(
+        from_bcs::to_u256(
+            bcs::to_bytes(&transaction_context::generate_auid_address())
+        )
+    )
+}
+
+ + + +
+ + + +## Function `new_unique_idx_type` + + + +
public fun new_unique_idx_type(idx: u256): order_book_types::UniqueIdxType
+
+ + + +
+Implementation + + +
public fun new_unique_idx_type(idx: u256): UniqueIdxType {
+    UniqueIdxType { idx }
+}
+
+ + + +
+ + + +## Function `descending_idx` + + + +
public fun descending_idx(self: &order_book_types::UniqueIdxType): order_book_types::UniqueIdxType
+
+ + + +
+Implementation + + +
public fun descending_idx(self: &UniqueIdxType): UniqueIdxType {
+    UniqueIdxType { idx: U256_MAX - self.idx }
+}
+
+ + + +
+ + + +## Function `new_active_matched_order` + + + +
public fun new_active_matched_order(order_id: order_book_types::OrderIdType, matched_size: u64, remaining_size: u64): order_book_types::ActiveMatchedOrder
+
+ + + +
+Implementation + + +
public fun new_active_matched_order(
+    order_id: OrderIdType, matched_size: u64, remaining_size: u64
+): ActiveMatchedOrder {
+    ActiveMatchedOrder { order_id, matched_size, remaining_size }
+}
+
+ + + +
+ + + +## Function `destroy_active_matched_order` + + + +
public fun destroy_active_matched_order(self: order_book_types::ActiveMatchedOrder): (order_book_types::OrderIdType, u64, u64)
+
+ + + +
+Implementation + + +
public fun destroy_active_matched_order(self: ActiveMatchedOrder): (OrderIdType, u64, u64) {
+    (self.order_id, self.matched_size, self.remaining_size)
+}
+
+ + + +
+ + + +## Function `new_order` + + + +
public fun new_order<M: copy, drop, store>(order_id: order_book_types::OrderIdType, unique_priority_idx: order_book_types::UniqueIdxType, price: u64, orig_size: u64, size: u64, is_buy: bool, trigger_condition: option::Option<order_book_types::TriggerCondition>, metadata: M): order_book_types::Order<M>
+
+ + + +
+Implementation + + +
public fun new_order<M: store + copy + drop>(
+    order_id: OrderIdType,
+    unique_priority_idx: UniqueIdxType,
+    price: u64,
+    orig_size: u64,
+    size: u64,
+    is_buy: bool,
+    trigger_condition: Option<TriggerCondition>,
+    metadata: M
+): Order<M> {
+    Order {
+        order_id,
+        unique_priority_idx,
+        price,
+        orig_size,
+        remaining_size: size,
+        is_bid: is_buy,
+        trigger_condition,
+        metadata
+    }
+}
+
+ + + +
+ + + +## Function `new_single_order_match` + + + +
public fun new_single_order_match<M: copy, drop, store>(order: order_book_types::Order<M>, matched_size: u64): order_book_types::SingleOrderMatch<M>
+
+ + + +
+Implementation + + +
public fun new_single_order_match<M: store + copy + drop>(
+    order: Order<M>, matched_size: u64
+): SingleOrderMatch<M> {
+    SingleOrderMatch { order, matched_size }
+}
+
+ + + +
+ + + +## Function `get_active_matched_size` + + + +
public fun get_active_matched_size(self: &order_book_types::ActiveMatchedOrder): u64
+
+ + + +
+Implementation + + +
public fun get_active_matched_size(self: &ActiveMatchedOrder): u64 {
+    self.matched_size
+}
+
+ + + +
+ + + +## Function `get_matched_size` + + + +
public fun get_matched_size<M: copy, drop, store>(self: &order_book_types::SingleOrderMatch<M>): u64
+
+ + + +
+Implementation + + +
public fun get_matched_size<M: store + copy + drop>(
+    self: &SingleOrderMatch<M>
+): u64 {
+    self.matched_size
+}
+
+ + + +
+ + + +## Function `new_order_with_state` + + + +
public fun new_order_with_state<M: copy, drop, store>(order: order_book_types::Order<M>, is_active: bool): order_book_types::OrderWithState<M>
+
+ + + +
+Implementation + + +
public fun new_order_with_state<M: store + copy + drop>(
+    order: Order<M>, is_active: bool
+): OrderWithState<M> {
+    OrderWithState { order, is_active }
+}
+
+ + + +
+ + + +## Function `tp_trigger_condition` + + + +
public fun tp_trigger_condition(take_profit: u64): order_book_types::TriggerCondition
+
+ + + +
+Implementation + + +
public fun tp_trigger_condition(take_profit: u64): TriggerCondition {
+    TriggerCondition::TakeProfit(take_profit)
+}
+
+ + + +
+ + + +## Function `sl_trigger_condition` + + + +
public fun sl_trigger_condition(stop_loss: u64): order_book_types::TriggerCondition
+
+ + + +
+Implementation + + +
public fun sl_trigger_condition(stop_loss: u64): TriggerCondition {
+    TriggerCondition::StopLoss(stop_loss)
+}
+
+ + + +
+ + + +## Function `index` + + + +
public fun index(self: &order_book_types::TriggerCondition, is_buy: bool): (option::Option<u64>, option::Option<u64>, option::Option<u64>)
+
+ + + +
+Implementation + + +
public fun index(self: &TriggerCondition, is_buy: bool):
+    (Option<u64>, Option<u64>, Option<u64>) {
+    match(self) {
+        TriggerCondition::TakeProfit(tp) => {
+            if (is_buy) {
+                (option::some(*tp), option::none(), option::none())
+            } else {
+                (option::none(), option::some(*tp), option::none())
+            }
+        }
+        TriggerCondition::StopLoss(sl) => {
+            if (is_buy) {
+                (option::none(), option::some(*sl), option::none())
+            } else {
+                (option::some(*sl), option::none(), option::none())
+            }
+        }
+        TriggerCondition::TimeBased(time) => {
+            (option::none(), option::none(), option::some(*time))
+        }
+    }
+}
+
+ + + +
+ + + +## Function `get_order_from_state` + + + +
public fun get_order_from_state<M: copy, drop, store>(self: &order_book_types::OrderWithState<M>): &order_book_types::Order<M>
+
+ + + +
+Implementation + + +
public fun get_order_from_state<M: store + copy + drop>(
+    self: &OrderWithState<M>
+): &Order<M> {
+    &self.order
+}
+
+ + + +
+ + + +## Function `get_metadata_from_state` + + + +
public fun get_metadata_from_state<M: copy, drop, store>(self: &order_book_types::OrderWithState<M>): M
+
+ + + +
+Implementation + + +
public fun get_metadata_from_state<M: store + copy + drop>(
+    self: &OrderWithState<M>
+): M {
+    self.order.metadata
+}
+
+ + + +
+ + + +## Function `get_order_id` + + + +
public fun get_order_id<M: copy, drop, store>(self: &order_book_types::Order<M>): order_book_types::OrderIdType
+
+ + + +
+Implementation + + +
public fun get_order_id<M: store + copy + drop>(self: &Order<M>): OrderIdType {
+    self.order_id
+}
+
+ + + +
+ + + +## Function `get_unique_priority_idx` + + + +
public fun get_unique_priority_idx<M: copy, drop, store>(self: &order_book_types::Order<M>): order_book_types::UniqueIdxType
+
+ + + +
+Implementation + + +
public fun get_unique_priority_idx<M: store + copy + drop>(self: &Order<M>): UniqueIdxType {
+    self.unique_priority_idx
+}
+
+ + + +
+ + + +## Function `get_metadata_from_order` + + + +
public fun get_metadata_from_order<M: copy, drop, store>(self: &order_book_types::Order<M>): M
+
+ + + +
+Implementation + + +
public fun get_metadata_from_order<M: store + copy + drop>(self: &Order<M>): M {
+    self.metadata
+}
+
+ + + +
+ + + +## Function `get_trigger_condition_from_order` + + + +
public fun get_trigger_condition_from_order<M: copy, drop, store>(self: &order_book_types::Order<M>): option::Option<order_book_types::TriggerCondition>
+
+ + + +
+Implementation + + +
public fun get_trigger_condition_from_order<M: store + copy + drop>(
+    self: &Order<M>
+): Option<TriggerCondition> {
+    self.trigger_condition
+}
+
+ + + +
+ + + +## Function `increase_remaining_size` + + + +
public fun increase_remaining_size<M: copy, drop, store>(self: &mut order_book_types::OrderWithState<M>, size: u64)
+
+ + + +
+Implementation + + +
public fun increase_remaining_size<M: store + copy + drop>(
+    self: &mut OrderWithState<M>, size: u64
+) {
+    self.order.remaining_size += size;
+}
+
+ + + +
+ + + +## Function `decrease_remaining_size` + + + +
public fun decrease_remaining_size<M: copy, drop, store>(self: &mut order_book_types::OrderWithState<M>, size: u64)
+
+ + + +
+Implementation + + +
public fun decrease_remaining_size<M: store + copy + drop>(
+    self: &mut OrderWithState<M>, size: u64
+) {
+    assert!(self.order.remaining_size > size, EINVALID_ORDER_SIZE_DECREASE);
+    self.order.remaining_size -= size;
+}
+
+ + + +
+ + + +## Function `set_remaining_size` + + + +
public fun set_remaining_size<M: copy, drop, store>(self: &mut order_book_types::OrderWithState<M>, remaining_size: u64)
+
+ + + +
+Implementation + + +
public fun set_remaining_size<M: store + copy + drop>(
+    self: &mut OrderWithState<M>, remaining_size: u64
+) {
+    self.order.remaining_size = remaining_size;
+}
+
+ + + +
+ + + +## Function `get_remaining_size_from_state` + + + +
public fun get_remaining_size_from_state<M: copy, drop, store>(self: &order_book_types::OrderWithState<M>): u64
+
+ + + +
+Implementation + + +
public fun get_remaining_size_from_state<M: store + copy + drop>(
+    self: &OrderWithState<M>
+): u64 {
+    self.order.remaining_size
+}
+
+ + + +
+ + + +## Function `get_unique_priority_idx_from_state` + + + +
public fun get_unique_priority_idx_from_state<M: copy, drop, store>(self: &order_book_types::OrderWithState<M>): order_book_types::UniqueIdxType
+
+ + + +
+Implementation + + +
public fun get_unique_priority_idx_from_state<M: store + copy + drop>(
+    self: &OrderWithState<M>
+): UniqueIdxType {
+    self.order.unique_priority_idx
+}
+
+ + + +
+ + + +## Function `get_remaining_size` + + + +
public fun get_remaining_size<M: copy, drop, store>(self: &order_book_types::Order<M>): u64
+
+ + + +
+Implementation + + +
public fun get_remaining_size<M: store + copy + drop>(self: &Order<M>): u64 {
+    self.remaining_size
+}
+
+ + + +
+ + + +## Function `get_orig_size` + + + +
public fun get_orig_size<M: copy, drop, store>(self: &order_book_types::Order<M>): u64
+
+ + + +
+Implementation + + +
public fun get_orig_size<M: store + copy + drop>(self: &Order<M>): u64 {
+    self.orig_size
+}
+
+ + + +
+ + + +## Function `destroy_order_from_state` + + + +
public fun destroy_order_from_state<M: copy, drop, store>(self: order_book_types::OrderWithState<M>): (order_book_types::Order<M>, bool)
+
+ + + +
+Implementation + + +
public fun destroy_order_from_state<M: store + copy + drop>(
+    self: OrderWithState<M>
+): (Order<M>, bool) {
+    (self.order, self.is_active)
+}
+
+ + + +
+ + + +## Function `destroy_active_match_order` + + + +
public fun destroy_active_match_order(self: order_book_types::ActiveMatchedOrder): (order_book_types::OrderIdType, u64, u64)
+
+ + + +
+Implementation + + +
public fun destroy_active_match_order(self: ActiveMatchedOrder): (OrderIdType, u64, u64) {
+    (self.order_id, self.matched_size, self.remaining_size)
+}
+
+ + + +
+ + + +## Function `destroy_order` + + + +
public fun destroy_order<M: copy, drop, store>(self: order_book_types::Order<M>): (order_book_types::OrderIdType, order_book_types::UniqueIdxType, u64, u64, u64, bool, option::Option<order_book_types::TriggerCondition>, M)
+
+ + + +
+Implementation + + +
public fun destroy_order<M: store + copy + drop>(
+    self: Order<M>
+): (OrderIdType, UniqueIdxType, u64, u64, u64, bool, Option<TriggerCondition>, M) {
+    (
+        self.order_id,
+        self.unique_priority_idx,
+        self.price,
+        self.orig_size,
+        self.remaining_size,
+        self.is_bid,
+        self.trigger_condition,
+        self.metadata
+    )
+}
+
+ + + +
+ + + +## Function `destroy_single_order_match` + + + +
public fun destroy_single_order_match<M: copy, drop, store>(self: order_book_types::SingleOrderMatch<M>): (order_book_types::Order<M>, u64)
+
+ + + +
+Implementation + + +
public fun destroy_single_order_match<M: store + copy + drop>(
+    self: SingleOrderMatch<M>
+): (Order<M>, u64) {
+    (self.order, self.matched_size)
+}
+
+ + + +
+ + + +## Function `destroy_order_id_type` + + + +
public fun destroy_order_id_type(self: order_book_types::OrderIdType): (address, u64)
+
+ + + +
+Implementation + + +
public fun destroy_order_id_type(self: OrderIdType): (address, u64) {
+    (self.account, self.account_order_id)
+}
+
+ + + +
+ + + +## Function `is_active_order` + + + +
public fun is_active_order<M: copy, drop, store>(self: &order_book_types::OrderWithState<M>): bool
+
+ + + +
+Implementation + + +
public fun is_active_order<M: store + copy + drop>(
+    self: &OrderWithState<M>
+): bool {
+    self.is_active
+}
+
+ + + +
+ + + +## Function `get_price` + + + +
public fun get_price<M: copy, drop, store>(self: &order_book_types::Order<M>): u64
+
+ + + +
+Implementation + + +
public fun get_price<M: store + copy + drop>(self: &Order<M>): u64 {
+    self.price
+}
+
+ + + +
+ + + +## Function `is_bid` + + + +
public fun is_bid<M: copy, drop, store>(self: &order_book_types::Order<M>): bool
+
+ + + +
+Implementation + + +
public fun is_bid<M: store + copy + drop>(self: &Order<M>): bool {
+    self.is_bid
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/overview.md b/aptos-move/framework/aptos-experimental/doc/overview.md new file mode 100644 index 0000000000000..d2b794233d461 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/overview.md @@ -0,0 +1,34 @@ + + + +# Aptos Experimental Framework + + +This is the reference documentation of the Aptos experimental framework. + + + + +## Index + + +- [`0x7::active_order_book`](active_order_book.md#0x7_active_order_book) +- [`0x7::benchmark_utils`](benchmark_utils.md#0x7_benchmark_utils) +- [`0x7::confidential_asset`](confidential_asset.md#0x7_confidential_asset) +- [`0x7::confidential_balance`](confidential_balance.md#0x7_confidential_balance) +- [`0x7::confidential_proof`](confidential_proof.md#0x7_confidential_proof) +- [`0x7::helpers`](helpers.md#0x7_helpers) +- [`0x7::large_packages`](large_packages.md#0x7_large_packages) +- [`0x7::market`](market.md#0x7_market) +- [`0x7::market_types`](market_types.md#0x7_market_types) +- [`0x7::order_book`](order_book.md#0x7_order_book) +- [`0x7::order_book_types`](order_book_types.md#0x7_order_book_types) +- [`0x7::pending_order_book_index`](pending_order_book_index.md#0x7_pending_order_book_index) +- [`0x7::ristretto255_twisted_elgamal`](ristretto255_twisted_elgamal.md#0x7_ristretto255_twisted_elgamal) +- [`0x7::sigma_protos`](sigma_protos.md#0x7_sigma_protos) +- [`0x7::test_derivable_account_abstraction_ed25519_hex`](test_derivable_account_abstraction_ed25519_hex.md#0x7_test_derivable_account_abstraction_ed25519_hex) +- [`0x7::test_function_values`](test_function_values.md#0x7_test_function_values) +- [`0x7::veiled_coin`](veiled_coin.md#0x7_veiled_coin) + + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/pending_order_book_index.md b/aptos-move/framework/aptos-experimental/doc/pending_order_book_index.md new file mode 100644 index 0000000000000..f115008451451 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/pending_order_book_index.md @@ -0,0 +1,322 @@ + + + +# Module `0x7::pending_order_book_index` + +(work in progress) + + +- [Struct `PendingOrderKey`](#0x7_pending_order_book_index_PendingOrderKey) +- [Enum `PendingOrderBookIndex`](#0x7_pending_order_book_index_PendingOrderBookIndex) +- [Function `new_pending_order_book_index`](#0x7_pending_order_book_index_new_pending_order_book_index) +- [Function `cancel_pending_order`](#0x7_pending_order_book_index_cancel_pending_order) +- [Function `place_pending_maker_order`](#0x7_pending_order_book_index_place_pending_maker_order) +- [Function `take_ready_price_based_orders`](#0x7_pending_order_book_index_take_ready_price_based_orders) +- [Function `take_time_time_based_orders`](#0x7_pending_order_book_index_take_time_time_based_orders) + + +
use 0x1::big_ordered_map;
+use 0x1::option;
+use 0x1::timestamp;
+use 0x7::order_book_types;
+
+ + + + + +## Struct `PendingOrderKey` + + + +
struct PendingOrderKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+price: u64 +
+
+ +
+
+tie_breaker: order_book_types::UniqueIdxType +
+
+ +
+
+ + +
+ + + +## Enum `PendingOrderBookIndex` + + + +
enum PendingOrderBookIndex has store
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+price_move_down_index: big_ordered_map::BigOrderedMap<pending_order_book_index::PendingOrderKey, order_book_types::OrderIdType> +
+
+ +
+
+price_move_up_index: big_ordered_map::BigOrderedMap<pending_order_book_index::PendingOrderKey, order_book_types::OrderIdType> +
+
+ +
+
+time_based_index: big_ordered_map::BigOrderedMap<u64, order_book_types::OrderIdType> +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Function `new_pending_order_book_index` + + + +
public(friend) fun new_pending_order_book_index(): pending_order_book_index::PendingOrderBookIndex
+
+ + + +
+Implementation + + +
public(friend) fun new_pending_order_book_index(): PendingOrderBookIndex {
+    PendingOrderBookIndex::V1 {
+        price_move_up_index: new_default_big_ordered_map(),
+        price_move_down_index: new_default_big_ordered_map(),
+        time_based_index: new_default_big_ordered_map()
+    }
+}
+
+ + + +
+ + + +## Function `cancel_pending_order` + + + +
public(friend) fun cancel_pending_order(self: &mut pending_order_book_index::PendingOrderBookIndex, trigger_condition: order_book_types::TriggerCondition, unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool)
+
+ + + +
+Implementation + + +
public(friend) fun cancel_pending_order(
+    self: &mut PendingOrderBookIndex,
+    trigger_condition: TriggerCondition,
+    unique_priority_idx: UniqueIdxType,
+    is_buy: bool
+) {
+    let (price_move_up_index, price_move_down_index, time_based_index) =
+        trigger_condition.index(is_buy);
+    if (price_move_up_index.is_some()) {
+        self.price_move_up_index.remove(
+            &PendingOrderKey {
+                price: price_move_up_index.destroy_some(),
+                tie_breaker: unique_priority_idx
+            }
+        );
+    };
+    if (price_move_down_index.is_some()) {
+        self.price_move_down_index.remove(
+            &PendingOrderKey {
+                price: price_move_down_index.destroy_some(),
+                tie_breaker: unique_priority_idx
+            }
+        );
+    };
+    if (time_based_index.is_some()) {
+        self.time_based_index.remove(&time_based_index.destroy_some());
+    };
+}
+
+ + + +
+ + + +## Function `place_pending_maker_order` + + + +
public(friend) fun place_pending_maker_order(self: &mut pending_order_book_index::PendingOrderBookIndex, order_id: order_book_types::OrderIdType, trigger_condition: order_book_types::TriggerCondition, unique_priority_idx: order_book_types::UniqueIdxType, is_buy: bool)
+
+ + + +
+Implementation + + +
public(friend) fun place_pending_maker_order(
+    self: &mut PendingOrderBookIndex,
+    order_id: OrderIdType,
+    trigger_condition: TriggerCondition,
+    unique_priority_idx: UniqueIdxType,
+    is_buy: bool
+) {
+    // Add this order to the pending order book index
+    let (price_move_down_index, price_move_up_index, time_based_index) =
+        trigger_condition.index(is_buy);
+
+    if (price_move_up_index.is_some()) {
+        self.price_move_up_index.add(
+            PendingOrderKey {
+                price: price_move_up_index.destroy_some(),
+                tie_breaker: unique_priority_idx
+            },
+            order_id
+        );
+    } else if (price_move_down_index.is_some()) {
+        self.price_move_down_index.add(
+            PendingOrderKey {
+                price: price_move_down_index.destroy_some(),
+                tie_breaker: unique_priority_idx
+            },
+            order_id
+        );
+    } else if (time_based_index.is_some()) {
+        self.time_based_index.add(time_based_index.destroy_some(), order_id);
+    };
+}
+
+ + + +
+ + + +## Function `take_ready_price_based_orders` + + + +
public fun take_ready_price_based_orders(self: &mut pending_order_book_index::PendingOrderBookIndex, current_price: u64): vector<order_book_types::OrderIdType>
+
+ + + +
+Implementation + + +
public fun take_ready_price_based_orders(
+    self: &mut PendingOrderBookIndex, current_price: u64
+): vector<OrderIdType> {
+    let orders = vector::empty();
+    while (!self.price_move_up_index.is_empty()) {
+        let (key, order_id) = self.price_move_up_index.borrow_front();
+        if (current_price >= key.price) {
+            orders.push_back(*order_id);
+            self.price_move_up_index.remove(&key);
+        } else {
+            break;
+        }
+    };
+    while (!self.price_move_down_index.is_empty()) {
+        let (key, order_id) = self.price_move_down_index.borrow_back();
+        if (current_price <= key.price) {
+            orders.push_back(*order_id);
+            self.price_move_down_index.remove(&key);
+        } else {
+            break;
+        }
+    };
+    orders
+}
+
+ + + +
+ + + +## Function `take_time_time_based_orders` + + + +
public fun take_time_time_based_orders(self: &mut pending_order_book_index::PendingOrderBookIndex): vector<order_book_types::OrderIdType>
+
+ + + +
+Implementation + + +
public fun take_time_time_based_orders(
+    self: &mut PendingOrderBookIndex
+): vector<OrderIdType> {
+    let orders = vector::empty();
+    while (!self.time_based_index.is_empty()) {
+        let current_time = timestamp::now_seconds();
+        let (time, order_id) = self.time_based_index.borrow_front();
+        if (current_time >= time) {
+            orders.push_back(*order_id);
+            self.time_based_index.remove(&time);
+        } else {
+            break;
+        }
+    };
+    orders
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/ristretto255_twisted_elgamal.md b/aptos-move/framework/aptos-experimental/doc/ristretto255_twisted_elgamal.md new file mode 100644 index 0000000000000..45308f0806763 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/ristretto255_twisted_elgamal.md @@ -0,0 +1,707 @@ + + + +# Module `0x7::ristretto255_twisted_elgamal` + +This module implements a Twisted ElGamal encryption API, over the Ristretto255 curve, designed to work with +additional cryptographic constructs such as Bulletproofs. + +A Twisted ElGamal *ciphertext* encrypts a value v under a basepoint G and a secondary point H, +alongside a public key Y = sk^(-1) * H, where sk is the corresponding secret key. The ciphertext is of the form: +(v * G + r * H, r * Y), where r is a random scalar. + +The Twisted ElGamal scheme differs from standard ElGamal by introducing a secondary point H to enhance +flexibility and functionality in cryptographic protocols. This design still maintains the homomorphic property: +Enc_Y(v, r) + Enc_Y(v', r') = Enc_Y(v + v', r + r'), where v, v' are plaintexts, Y is the public key, +and r, r' are random scalars. + + +- [Struct `Ciphertext`](#0x7_ristretto255_twisted_elgamal_Ciphertext) +- [Struct `CompressedCiphertext`](#0x7_ristretto255_twisted_elgamal_CompressedCiphertext) +- [Struct `CompressedPubkey`](#0x7_ristretto255_twisted_elgamal_CompressedPubkey) +- [Function `new_pubkey_from_bytes`](#0x7_ristretto255_twisted_elgamal_new_pubkey_from_bytes) +- [Function `pubkey_to_bytes`](#0x7_ristretto255_twisted_elgamal_pubkey_to_bytes) +- [Function `pubkey_to_point`](#0x7_ristretto255_twisted_elgamal_pubkey_to_point) +- [Function `pubkey_to_compressed_point`](#0x7_ristretto255_twisted_elgamal_pubkey_to_compressed_point) +- [Function `new_ciphertext_from_bytes`](#0x7_ristretto255_twisted_elgamal_new_ciphertext_from_bytes) +- [Function `new_ciphertext_no_randomness`](#0x7_ristretto255_twisted_elgamal_new_ciphertext_no_randomness) +- [Function `ciphertext_from_points`](#0x7_ristretto255_twisted_elgamal_ciphertext_from_points) +- [Function `ciphertext_from_compressed_points`](#0x7_ristretto255_twisted_elgamal_ciphertext_from_compressed_points) +- [Function `ciphertext_to_bytes`](#0x7_ristretto255_twisted_elgamal_ciphertext_to_bytes) +- [Function `ciphertext_into_points`](#0x7_ristretto255_twisted_elgamal_ciphertext_into_points) +- [Function `ciphertext_as_points`](#0x7_ristretto255_twisted_elgamal_ciphertext_as_points) +- [Function `compress_ciphertext`](#0x7_ristretto255_twisted_elgamal_compress_ciphertext) +- [Function `decompress_ciphertext`](#0x7_ristretto255_twisted_elgamal_decompress_ciphertext) +- [Function `ciphertext_add`](#0x7_ristretto255_twisted_elgamal_ciphertext_add) +- [Function `ciphertext_add_assign`](#0x7_ristretto255_twisted_elgamal_ciphertext_add_assign) +- [Function `ciphertext_sub`](#0x7_ristretto255_twisted_elgamal_ciphertext_sub) +- [Function `ciphertext_sub_assign`](#0x7_ristretto255_twisted_elgamal_ciphertext_sub_assign) +- [Function `ciphertext_clone`](#0x7_ristretto255_twisted_elgamal_ciphertext_clone) +- [Function `ciphertext_equals`](#0x7_ristretto255_twisted_elgamal_ciphertext_equals) +- [Function `get_value_component`](#0x7_ristretto255_twisted_elgamal_get_value_component) + + +
use 0x1::option;
+use 0x1::ristretto255;
+use 0x1::vector;
+
+ + + + + +## Struct `Ciphertext` + +A Twisted ElGamal ciphertext, consisting of two Ristretto255 points. + + +
struct Ciphertext has drop
+
+ + + +
+Fields + + +
+
+left: ristretto255::RistrettoPoint +
+
+ +
+
+right: ristretto255::RistrettoPoint +
+
+ +
+
+ + +
+ + + +## Struct `CompressedCiphertext` + +A compressed Twisted ElGamal ciphertext, consisting of two compressed Ristretto255 points. + + +
struct CompressedCiphertext has copy, drop, store
+
+ + + +
+Fields + + +
+
+left: ristretto255::CompressedRistretto +
+
+ +
+
+right: ristretto255::CompressedRistretto +
+
+ +
+
+ + +
+ + + +## Struct `CompressedPubkey` + +A Twisted ElGamal public key, represented as a compressed Ristretto255 point. + + +
struct CompressedPubkey has copy, drop, store
+
+ + + +
+Fields + + +
+
+point: ristretto255::CompressedRistretto +
+
+ +
+
+ + +
+ + + +## Function `new_pubkey_from_bytes` + +Creates a new public key from a serialized Ristretto255 point. +Returns Some(CompressedPubkey) if the deserialization is successful, otherwise None. + + +
public fun new_pubkey_from_bytes(bytes: vector<u8>): option::Option<ristretto255_twisted_elgamal::CompressedPubkey>
+
+ + + +
+Implementation + + +
public fun new_pubkey_from_bytes(bytes: vector<u8>): Option<CompressedPubkey> {
+    let point = ristretto255::new_compressed_point_from_bytes(bytes);
+    if (point.is_some()) {
+        let pk = CompressedPubkey {
+            point: point.extract()
+        };
+        std::option::some(pk)
+    } else {
+        std::option::none()
+    }
+}
+
+ + + +
+ + + +## Function `pubkey_to_bytes` + +Serializes a Twisted ElGamal public key into its byte representation. + + +
public fun pubkey_to_bytes(pubkey: &ristretto255_twisted_elgamal::CompressedPubkey): vector<u8>
+
+ + + +
+Implementation + + +
public fun pubkey_to_bytes(pubkey: &CompressedPubkey): vector<u8> {
+    ristretto255::compressed_point_to_bytes(pubkey.point)
+}
+
+ + + +
+ + + +## Function `pubkey_to_point` + +Converts a public key into its corresponding RistrettoPoint. + + +
public fun pubkey_to_point(pubkey: &ristretto255_twisted_elgamal::CompressedPubkey): ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun pubkey_to_point(pubkey: &CompressedPubkey): RistrettoPoint {
+    ristretto255::point_decompress(&pubkey.point)
+}
+
+ + + +
+ + + +## Function `pubkey_to_compressed_point` + +Converts a public key into its corresponding CompressedRistretto representation. + + +
public fun pubkey_to_compressed_point(pubkey: &ristretto255_twisted_elgamal::CompressedPubkey): ristretto255::CompressedRistretto
+
+ + + +
+Implementation + + +
public fun pubkey_to_compressed_point(pubkey: &CompressedPubkey): CompressedRistretto {
+    pubkey.point
+}
+
+ + + +
+ + + +## Function `new_ciphertext_from_bytes` + +Creates a new ciphertext from a serialized representation, consisting of two 32-byte Ristretto255 points. +Returns Some(Ciphertext) if the deserialization succeeds, otherwise None. + + +
public fun new_ciphertext_from_bytes(bytes: vector<u8>): option::Option<ristretto255_twisted_elgamal::Ciphertext>
+
+ + + +
+Implementation + + +
public fun new_ciphertext_from_bytes(bytes: vector<u8>): Option<Ciphertext> {
+    if (bytes.length() != 64) {
+        return std::option::none()
+    };
+
+    let bytes_right = bytes.trim(32);
+
+    let left_point = ristretto255::new_point_from_bytes(bytes);
+    let right_point = ristretto255::new_point_from_bytes(bytes_right);
+
+    if (left_point.is_some() && right_point.is_some()) {
+        std::option::some(Ciphertext {
+            left: left_point.extract(),
+            right: right_point.extract()
+        })
+    } else {
+        std::option::none()
+    }
+}
+
+ + + +
+ + + +## Function `new_ciphertext_no_randomness` + +Creates a ciphertext (val * G, 0 * G) where val is the plaintext, and the randomness is set to zero. + + +
public fun new_ciphertext_no_randomness(val: &ristretto255::Scalar): ristretto255_twisted_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun new_ciphertext_no_randomness(val: &Scalar): Ciphertext {
+    Ciphertext {
+        left: ristretto255::basepoint_mul(val),
+        right: ristretto255::point_identity(),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_from_points` + +Constructs a Twisted ElGamal ciphertext from two RistrettoPoints. + + +
public fun ciphertext_from_points(left: ristretto255::RistrettoPoint, right: ristretto255::RistrettoPoint): ristretto255_twisted_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_from_points(left: RistrettoPoint, right: RistrettoPoint): Ciphertext {
+    Ciphertext {
+        left,
+        right,
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_from_compressed_points` + +Constructs a Twisted ElGamal ciphertext from two compressed Ristretto255 points. + + +
public fun ciphertext_from_compressed_points(left: ristretto255::CompressedRistretto, right: ristretto255::CompressedRistretto): ristretto255_twisted_elgamal::CompressedCiphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_from_compressed_points(
+    left: CompressedRistretto,
+    right: CompressedRistretto
+): CompressedCiphertext {
+    CompressedCiphertext {
+        left,
+        right,
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_to_bytes` + +Serializes a Twisted ElGamal ciphertext into its byte representation. + + +
public fun ciphertext_to_bytes(ct: &ristretto255_twisted_elgamal::Ciphertext): vector<u8>
+
+ + + +
+Implementation + + +
public fun ciphertext_to_bytes(ct: &Ciphertext): vector<u8> {
+    let bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.left));
+    bytes.append(ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.right)));
+    bytes
+}
+
+ + + +
+ + + +## Function `ciphertext_into_points` + +Converts a ciphertext into a pair of RistrettoPoints. + + +
public fun ciphertext_into_points(c: ristretto255_twisted_elgamal::Ciphertext): (ristretto255::RistrettoPoint, ristretto255::RistrettoPoint)
+
+ + + +
+Implementation + + +
public fun ciphertext_into_points(c: Ciphertext): (RistrettoPoint, RistrettoPoint) {
+    let Ciphertext { left, right } = c;
+    (left, right)
+}
+
+ + + +
+ + + +## Function `ciphertext_as_points` + +Returns the two RistrettoPoints representing the ciphertext. + + +
public fun ciphertext_as_points(c: &ristretto255_twisted_elgamal::Ciphertext): (&ristretto255::RistrettoPoint, &ristretto255::RistrettoPoint)
+
+ + + +
+Implementation + + +
public fun ciphertext_as_points(c: &Ciphertext): (&RistrettoPoint, &RistrettoPoint) {
+    (&c.left, &c.right)
+}
+
+ + + +
+ + + +## Function `compress_ciphertext` + +Compresses a Twisted ElGamal ciphertext into its CompressedCiphertext representation. + + +
public fun compress_ciphertext(ct: &ristretto255_twisted_elgamal::Ciphertext): ristretto255_twisted_elgamal::CompressedCiphertext
+
+ + + +
+Implementation + + +
public fun compress_ciphertext(ct: &Ciphertext): CompressedCiphertext {
+    CompressedCiphertext {
+        left: ristretto255::point_compress(&ct.left),
+        right: ristretto255::point_compress(&ct.right),
+    }
+}
+
+ + + +
+ + + +## Function `decompress_ciphertext` + +Decompresses a CompressedCiphertext back into its Ciphertext representation. + + +
public fun decompress_ciphertext(ct: &ristretto255_twisted_elgamal::CompressedCiphertext): ristretto255_twisted_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun decompress_ciphertext(ct: &CompressedCiphertext): Ciphertext {
+    Ciphertext {
+        left: ristretto255::point_decompress(&ct.left),
+        right: ristretto255::point_decompress(&ct.right),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_add` + +Adds two ciphertexts homomorphically, producing a new ciphertext representing the sum of the two. + + +
public fun ciphertext_add(lhs: &ristretto255_twisted_elgamal::Ciphertext, rhs: &ristretto255_twisted_elgamal::Ciphertext): ristretto255_twisted_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_add(lhs: &Ciphertext, rhs: &Ciphertext): Ciphertext {
+    Ciphertext {
+        left: ristretto255::point_add(&lhs.left, &rhs.left),
+        right: ristretto255::point_add(&lhs.right, &rhs.right),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_add_assign` + +Adds two ciphertexts homomorphically, updating the first ciphertext in place. + + +
public fun ciphertext_add_assign(lhs: &mut ristretto255_twisted_elgamal::Ciphertext, rhs: &ristretto255_twisted_elgamal::Ciphertext)
+
+ + + +
+Implementation + + +
public fun ciphertext_add_assign(lhs: &mut Ciphertext, rhs: &Ciphertext) {
+    ristretto255::point_add_assign(&mut lhs.left, &rhs.left);
+    ristretto255::point_add_assign(&mut lhs.right, &rhs.right);
+}
+
+ + + +
+ + + +## Function `ciphertext_sub` + +Subtracts one ciphertext from another homomorphically, producing a new ciphertext representing the difference. + + +
public fun ciphertext_sub(lhs: &ristretto255_twisted_elgamal::Ciphertext, rhs: &ristretto255_twisted_elgamal::Ciphertext): ristretto255_twisted_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_sub(lhs: &Ciphertext, rhs: &Ciphertext): Ciphertext {
+    Ciphertext {
+        left: ristretto255::point_sub(&lhs.left, &rhs.left),
+        right: ristretto255::point_sub(&lhs.right, &rhs.right),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_sub_assign` + +Subtracts one ciphertext from another homomorphically, updating the first ciphertext in place. + + +
public fun ciphertext_sub_assign(lhs: &mut ristretto255_twisted_elgamal::Ciphertext, rhs: &ristretto255_twisted_elgamal::Ciphertext)
+
+ + + +
+Implementation + + +
public fun ciphertext_sub_assign(lhs: &mut Ciphertext, rhs: &Ciphertext) {
+    ristretto255::point_sub_assign(&mut lhs.left, &rhs.left);
+    ristretto255::point_sub_assign(&mut lhs.right, &rhs.right);
+}
+
+ + + +
+ + + +## Function `ciphertext_clone` + +Creates a copy of the provided ciphertext. + + +
public fun ciphertext_clone(c: &ristretto255_twisted_elgamal::Ciphertext): ristretto255_twisted_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun ciphertext_clone(c: &Ciphertext): Ciphertext {
+    Ciphertext {
+        left: ristretto255::point_clone(&c.left),
+        right: ristretto255::point_clone(&c.right),
+    }
+}
+
+ + + +
+ + + +## Function `ciphertext_equals` + +Compares two ciphertexts for equality, returning true if they encrypt the same value and randomness. + + +
public fun ciphertext_equals(lhs: &ristretto255_twisted_elgamal::Ciphertext, rhs: &ristretto255_twisted_elgamal::Ciphertext): bool
+
+ + + +
+Implementation + + +
public fun ciphertext_equals(lhs: &Ciphertext, rhs: &Ciphertext): bool {
+    ristretto255::point_equals(&lhs.left, &rhs.left) &&
+        ristretto255::point_equals(&lhs.right, &rhs.right)
+}
+
+ + + +
+ + + +## Function `get_value_component` + +Returns the RistrettoPoint in the ciphertext that contains the encrypted value in the exponent. + + +
public fun get_value_component(ct: &ristretto255_twisted_elgamal::Ciphertext): &ristretto255::RistrettoPoint
+
+ + + +
+Implementation + + +
public fun get_value_component(ct: &Ciphertext): &RistrettoPoint {
+    &ct.left
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/sigma_protos.md b/aptos-move/framework/aptos-experimental/doc/sigma_protos.md new file mode 100644 index 0000000000000..7cadbdfcd44e2 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/sigma_protos.md @@ -0,0 +1,817 @@ + + + +# Module `0x7::sigma_protos` + +Package for creating, verifying, serializing & deserializing the $\Sigma$-protocol proofs used in veiled coins. + + + + +### Preliminaries + + +Recall that a $\Sigma$-protocol proof argues knowledge of a *secret* witness $w$ such that an arithmetic relation +$R(x; w) = 1$ is satisfied over group and field elements stored in $x$ and $w$. + +Here, $x$ is a public statement known to the verifier (i.e., known to the validators). Importantly, the +$\Sigma$-protocol's zero-knowledge property ensures the witness $w$ remains secret. + + + + +### WithdrawalSubproof: ElGamal-Pedersen equality + + +This proof is used to provably convert an ElGamal ciphertext to a Pedersen commitment over which a ZK range proof +can be securely computed. Otherwise, knowledge of the ElGamal SK breaks the binding of the 2nd component of the +ElGamal ciphertext, making any ZK range proof over it useless. +Because the sender cannot, after receiving a fully veiled transaction, compute their balance randomness, their +updated balance ciphertext is computed in the relation, which is then linked to the Pedersen commitment of $b$. + +The secret witness $w$ in this relation, known only to the sender of the TXN, consists of: +- $b$, sender's new balance, after the withdrawal from their veiled balance +- $r$, randomness used to commit to $b$ +- $sk$, the sender's secret ElGamal encryption key + +(Note that the $\Sigma$-protocol's zero-knowledge property ensures the witness is not revealed.) + +The public statement $x$ in this relation consists of: +- $G$, basepoint of a given elliptic curve +- $H$, basepoint used for randomness in the Pedersen commitments +- $(C_1, C_2)$, ElGamal encryption of the sender's current balance +- $c$, Pedersen commitment to $b$ with randomness $r$ +- $v$, the amount the sender is withdrawing +- $Y$, the sender's ElGamal encryption public key + +The relation being proved is as follows: + +``` +R( +x = [ (C_1, C_2), c, G, H, Y, v] +w = [ b, r, sk ] +) = { +C_1 - v G = b G + sk C_2 +c = b G + r H +Y = sk G +} +``` + + + + +### TransferSubproof: ElGamal-Pedersen equality and ElGamal-ElGamal equality + + +This protocol argues two things. First, that the same amount is ElGamal-encrypted for both the sender and recipient. +This is needed to correctly withdraw & deposit the same amount during a transfer. Second, that this same amount is +committed via Pedersen. Third, that a Pedersen-committed balance is correctly ElGamal encrypted. ZK range proofs +are computed over these last two Pedersen commitments, to prevent overflowing attacks on the balance. + +The secret witness $w$ in this relation, known only to the sender of the TXN, consists of: +- $v$, amount being transferred +- $r$, randomness used to ElGamal-encrypt $v$ +- $b$, sender's new balance after the transfer occurs +- $r_b$, randomness used to Pedersen commit $b$ +- $sk$, the sender's secret ElGamal encryption key + +The public statement $x$ in this relation consists of: +- Public parameters ++ $G$, basepoint of a given elliptic curve ++ $H$, basepoint used for randomness in the Pedersen commitments +- PKs ++ $Y$, sender's PK ++ $Y'$, recipient's PK +- Amount encryption & commitment ++ $(C, D)$, ElGamal encryption of $v$, under the sender's PK, using randomness $r$ ++ $(C', D)$, ElGamal encryption of $v$, under the recipient's PK, using randomness $r$ ++ $c$, Pedersen commitment to $v$ using randomness $r$ +- New balance encryption & commitment ++ $(C_1, C_2)$, ElGamal encryption of the sender's *current* balance, under the sender's PK. This is used to +compute the sender's updated balance in the relation, as the sender cannot know their balance randomness. ++ $c'$, Pedersen commitment to $b$ using randomness $r_b$ + +The relation being proved is: +``` +R( +x = [ Y, Y', (C, C', D), c, (C_1, C_2), c', G, H ] +w = [ v, r, b, r_b, sk ] +) = { +C = v G + r Y +C' = v G + r Y' +D = r G +C_1 - C = b G + sk (C_2 - D) +c = v G + r H +c' = b G + r_b H +Y = sk G +} +``` + +A relation similar to this is also described on page 14 of the Zether paper [BAZB20] (just replace $G$ -> $g$, +$C'$ -> $\bar{C}$, $Y$ -> $y$, $Y'$ -> $\bar{y}$, $v$ -> $b^*$). Note that their relation does not include the +ElGamal-to-Pedersen conversion parts, as they can do ZK range proofs directly over ElGamal ciphertexts using their +$\Sigma$-bullets modification of Bulletproofs. + + + - [Preliminaries](#@Preliminaries_0) + - [WithdrawalSubproof: ElGamal-Pedersen equality](#@WithdrawalSubproof:_ElGamal-Pedersen_equality_1) + - [TransferSubproof: ElGamal-Pedersen equality and ElGamal-ElGamal equality](#@TransferSubproof:_ElGamal-Pedersen_equality_and_ElGamal-ElGamal_equality_2) +- [Struct `WithdrawalSubproof`](#0x7_sigma_protos_WithdrawalSubproof) +- [Struct `TransferSubproof`](#0x7_sigma_protos_TransferSubproof) +- [Constants](#@Constants_3) +- [Function `verify_transfer_subproof`](#0x7_sigma_protos_verify_transfer_subproof) +- [Function `verify_withdrawal_subproof`](#0x7_sigma_protos_verify_withdrawal_subproof) +- [Function `deserialize_withdrawal_subproof`](#0x7_sigma_protos_deserialize_withdrawal_subproof) +- [Function `deserialize_transfer_subproof`](#0x7_sigma_protos_deserialize_transfer_subproof) +- [Function `fiat_shamir_withdrawal_subproof_challenge`](#0x7_sigma_protos_fiat_shamir_withdrawal_subproof_challenge) +- [Function `fiat_shamir_transfer_subproof_challenge`](#0x7_sigma_protos_fiat_shamir_transfer_subproof_challenge) + + +
use 0x1::error;
+use 0x1::option;
+use 0x1::ristretto255;
+use 0x1::ristretto255_elgamal;
+use 0x1::ristretto255_pedersen;
+use 0x1::vector;
+use 0x7::helpers;
+
+ + + + + +## Struct `WithdrawalSubproof` + +A $\Sigma$-protocol used during an unveiled withdrawal (for proving the correct ElGamal encryption of a +Pedersen-committed balance). + + +
struct WithdrawalSubproof has drop
+
+ + + +
+Fields + + +
+
+x1: ristretto255::RistrettoPoint +
+
+ +
+
+x2: ristretto255::RistrettoPoint +
+
+ +
+
+x3: ristretto255::RistrettoPoint +
+
+ +
+
+alpha1: ristretto255::Scalar +
+
+ +
+
+alpha2: ristretto255::Scalar +
+
+ +
+
+alpha3: ristretto255::Scalar +
+
+ +
+
+ + +
+ + + +## Struct `TransferSubproof` + +A $\Sigma$-protocol proof used during a veiled transfer. This proof encompasses the $\Sigma$-protocol from +WithdrawalSubproof. + + +
struct TransferSubproof has drop
+
+ + + +
+Fields + + +
+
+x1: ristretto255::RistrettoPoint +
+
+ +
+
+x2: ristretto255::RistrettoPoint +
+
+ +
+
+x3: ristretto255::RistrettoPoint +
+
+ +
+
+x4: ristretto255::RistrettoPoint +
+
+ +
+
+x5: ristretto255::RistrettoPoint +
+
+ +
+
+x6: ristretto255::RistrettoPoint +
+
+ +
+
+x7: ristretto255::RistrettoPoint +
+
+ +
+
+alpha1: ristretto255::Scalar +
+
+ +
+
+alpha2: ristretto255::Scalar +
+
+ +
+
+alpha3: ristretto255::Scalar +
+
+ +
+
+alpha4: ristretto255::Scalar +
+
+ +
+
+alpha5: ristretto255::Scalar +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The $\Sigma$-protocol proof for withdrawals did not verify. + + +
const ESIGMA_PROTOCOL_VERIFY_FAILED: u64 = 1;
+
+ + + + + +The domain separation tag (DST) used in the Fiat-Shamir transform of our $\Sigma$-protocol. + + +
const FIAT_SHAMIR_SIGMA_DST: vector<u8> = [65, 112, 116, 111, 115, 86, 101, 105, 108, 101, 100, 67, 111, 105, 110, 47, 87, 105, 116, 104, 100, 114, 97, 119, 97, 108, 83, 117, 98, 112, 114, 111, 111, 102, 70, 105, 97, 116, 83, 104, 97, 109, 105, 114];
+
+ + + + + +## Function `verify_transfer_subproof` + +Verifies a $\Sigma$-protocol proof necessary to ensure correctness of a veiled transfer. + +Specifically, the proof argues that the same amount $v$ is Pedersen-committed in comm_amount and ElGamal- +encrypted in withdraw_ct (under sender_pk) and in deposit_ct (under recipient_pk), all three using the +same randomness $r$. + +In addition, it argues that the sender's new balance $b$ committed to by sender_new_balance_comm is the same +as the value encrypted by the ciphertext obtained by subtracting withdraw_ct from sender_curr_balance_ct + + +
public fun verify_transfer_subproof(sender_pk: &ristretto255_elgamal::CompressedPubkey, recipient_pk: &ristretto255_elgamal::CompressedPubkey, withdraw_ct: &ristretto255_elgamal::Ciphertext, deposit_ct: &ristretto255_elgamal::Ciphertext, comm_amount: &ristretto255_pedersen::Commitment, sender_new_balance_comm: &ristretto255_pedersen::Commitment, sender_curr_balance_ct: &ristretto255_elgamal::Ciphertext, proof: &sigma_protos::TransferSubproof)
+
+ + + +
+Implementation + + +
public fun verify_transfer_subproof(
+    sender_pk: &elgamal::CompressedPubkey,
+    recipient_pk: &elgamal::CompressedPubkey,
+    withdraw_ct: &elgamal::Ciphertext,
+    deposit_ct: &elgamal::Ciphertext,
+    comm_amount: &pedersen::Commitment,
+    sender_new_balance_comm: &pedersen::Commitment,
+    sender_curr_balance_ct: &elgamal::Ciphertext,
+    proof: &TransferSubproof)
+{
+    let h = pedersen::randomness_base_for_bulletproof();
+    let sender_pk_point = elgamal::pubkey_to_point(sender_pk);
+    let recipient_pk_point = elgamal::pubkey_to_point(recipient_pk);
+    let (big_c, big_d) = elgamal::ciphertext_as_points(withdraw_ct);
+    let (bar_big_c, _) = elgamal::ciphertext_as_points(deposit_ct);
+    let c = pedersen::commitment_as_point(comm_amount);
+    let (c1, c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct);
+    let bar_c = pedersen::commitment_as_point(sender_new_balance_comm);
+
+    // TODO: Can be optimized so we don't re-serialize the proof for Fiat-Shamir
+    let rho = fiat_shamir_transfer_subproof_challenge(
+        sender_pk, recipient_pk,
+        withdraw_ct, deposit_ct, comm_amount,
+        sender_curr_balance_ct, sender_new_balance_comm,
+        &proof.x1, &proof.x2, &proof.x3, &proof.x4,
+        &proof.x5, &proof.x6, &proof.x7);
+
+    let g_alpha2 = ristretto255::basepoint_mul(&proof.alpha2);
+    // \rho * D + X1 =? \alpha_2 * g
+    let d_acc = ristretto255::point_mul(big_d, &rho);
+    ristretto255::point_add_assign(&mut d_acc, &proof.x1);
+    assert!(ristretto255::point_equals(&d_acc, &g_alpha2), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+
+    let g_alpha1 = ristretto255::basepoint_mul(&proof.alpha1);
+    // \rho * C + X2 =? \alpha_1 * g + \alpha_2 * y
+    let big_c_acc = ristretto255::point_mul(big_c, &rho);
+    ristretto255::point_add_assign(&mut big_c_acc, &proof.x2);
+    let y_alpha2 = ristretto255::point_mul(&sender_pk_point, &proof.alpha2);
+    ristretto255::point_add_assign(&mut y_alpha2, &g_alpha1);
+    assert!(ristretto255::point_equals(&big_c_acc, &y_alpha2), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+
+    // \rho * \bar{C} + X3 =? \alpha_1 * g + \alpha_2 * \bar{y}
+    let big_bar_c_acc = ristretto255::point_mul(bar_big_c, &rho);
+    ristretto255::point_add_assign(&mut big_bar_c_acc, &proof.x3);
+    let y_bar_alpha2 = ristretto255::point_mul(&recipient_pk_point, &proof.alpha2);
+    ristretto255::point_add_assign(&mut y_bar_alpha2, &g_alpha1);
+    assert!(ristretto255::point_equals(&big_bar_c_acc, &y_bar_alpha2), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+
+    let g_alpha3 = ristretto255::basepoint_mul(&proof.alpha3);
+    // \rho * (C_1 - C) + X_4 =? \alpha_3 * g + \alpha_5 * (C_2 - D)
+    let big_c1_acc = ristretto255::point_sub(c1, big_c);
+    ristretto255::point_mul_assign(&mut big_c1_acc, &rho);
+    ristretto255::point_add_assign(&mut big_c1_acc, &proof.x4);
+
+    let big_c2_acc = ristretto255::point_sub(c2, big_d);
+    ristretto255::point_mul_assign(&mut big_c2_acc, &proof.alpha5);
+    ristretto255::point_add_assign(&mut big_c2_acc, &g_alpha3);
+    assert!(ristretto255::point_equals(&big_c1_acc, &big_c2_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+
+    // \rho * c + X_5 =? \alpha_1 * g + \alpha_2 * h
+    let c_acc = ristretto255::point_mul(c, &rho);
+    ristretto255::point_add_assign(&mut c_acc, &proof.x5);
+
+    let h_alpha2_acc = ristretto255::point_mul(&h, &proof.alpha2);
+    ristretto255::point_add_assign(&mut h_alpha2_acc, &g_alpha1);
+    assert!(ristretto255::point_equals(&c_acc, &h_alpha2_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+
+    // \rho * \bar{c} + X_6 =? \alpha_3 * g + \alpha_4 * h
+    let bar_c_acc = ristretto255::point_mul(bar_c, &rho);
+    ristretto255::point_add_assign(&mut bar_c_acc, &proof.x6);
+
+    let h_alpha4_acc = ristretto255::point_mul(&h, &proof.alpha4);
+    ristretto255::point_add_assign(&mut h_alpha4_acc, &g_alpha3);
+    assert!(ristretto255::point_equals(&bar_c_acc, &h_alpha4_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+
+    // \rho * Y + X_7 =? \alpha_5 * G
+    let y_acc = ristretto255::point_mul(&sender_pk_point, &rho);
+    ristretto255::point_add_assign(&mut y_acc, &proof.x7);
+
+    let g_alpha5 = ristretto255::basepoint_mul(&proof.alpha5);
+    assert!(ristretto255::point_equals(&y_acc, &g_alpha5), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+}
+
+ + + +
+ + + +## Function `verify_withdrawal_subproof` + +Verifies the $\Sigma$-protocol proof necessary to ensure correctness of a veiled-to-unveiled transfer. + +Specifically, the proof argues that the same amount $v$ is Pedersen-committed in sender_new_balance_comm and +ElGamal-encrypted in the ciphertext obtained by subtracting the ciphertext (vG, 0G) from sender_curr_balance_ct + + +
public fun verify_withdrawal_subproof(sender_pk: &ristretto255_elgamal::CompressedPubkey, sender_curr_balance_ct: &ristretto255_elgamal::Ciphertext, sender_new_balance_comm: &ristretto255_pedersen::Commitment, amount: &ristretto255::Scalar, proof: &sigma_protos::WithdrawalSubproof)
+
+ + + +
+Implementation + + +
public fun verify_withdrawal_subproof(
+    sender_pk: &elgamal::CompressedPubkey,
+    sender_curr_balance_ct: &elgamal::Ciphertext,
+    sender_new_balance_comm: &pedersen::Commitment,
+    amount: &Scalar,
+    proof: &WithdrawalSubproof)
+{
+    let h = pedersen::randomness_base_for_bulletproof();
+    let (big_c1, big_c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct);
+    let c = pedersen::commitment_as_point(sender_new_balance_comm);
+    let sender_pk_point = elgamal::pubkey_to_point(sender_pk);
+
+    let rho = fiat_shamir_withdrawal_subproof_challenge(
+        sender_pk,
+        sender_curr_balance_ct,
+        sender_new_balance_comm,
+        amount,
+        &proof.x1,
+        &proof.x2,
+        &proof.x3);
+
+    let g_alpha1 = ristretto255::basepoint_mul(&proof.alpha1);
+    // \rho * (C_1 - v * g) + X_1 =? \alpha_1 * g + \alpha_3 * C_2
+    let gv = ristretto255::basepoint_mul(amount);
+    let big_c1_acc = ristretto255::point_sub(big_c1, &gv);
+    ristretto255::point_mul_assign(&mut big_c1_acc, &rho);
+    ristretto255::point_add_assign(&mut big_c1_acc, &proof.x1);
+
+    let big_c2_acc = ristretto255::point_mul(big_c2, &proof.alpha3);
+    ristretto255::point_add_assign(&mut big_c2_acc, &g_alpha1);
+    assert!(ristretto255::point_equals(&big_c1_acc, &big_c2_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+
+    // \rho * c + X_2 =? \alpha_1 * g + \alpha_2 * h
+    let c_acc = ristretto255::point_mul(c, &rho);
+    ristretto255::point_add_assign(&mut c_acc, &proof.x2);
+
+    let h_alpha2_acc = ristretto255::point_mul(&h, &proof.alpha2);
+    ristretto255::point_add_assign(&mut h_alpha2_acc, &g_alpha1);
+    assert!(ristretto255::point_equals(&c_acc, &h_alpha2_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+
+    // \rho * Y + X_3 =? \alpha_3 * g
+    let y_acc = ristretto255::point_mul(&sender_pk_point, &rho);
+    ristretto255::point_add_assign(&mut y_acc, &proof.x3);
+
+    let g_alpha3 = ristretto255::basepoint_mul(&proof.alpha3);
+    assert!(ristretto255::point_equals(&y_acc, &g_alpha3), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED));
+}
+
+ + + +
+ + + +## Function `deserialize_withdrawal_subproof` + +Deserializes and returns an WithdrawalSubproof given its byte representation. + + +
public fun deserialize_withdrawal_subproof(proof_bytes: vector<u8>): option::Option<sigma_protos::WithdrawalSubproof>
+
+ + + +
+Implementation + + +
public fun deserialize_withdrawal_subproof(proof_bytes: vector<u8>): Option<WithdrawalSubproof> {
+    if (proof_bytes.length::<u8>() != 192) {
+        return std::option::none<WithdrawalSubproof>()
+    };
+
+    let x1_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x1 = ristretto255::new_point_from_bytes(x1_bytes);
+    if (!x1.is_some::<RistrettoPoint>()) {
+        return std::option::none<WithdrawalSubproof>()
+    };
+    let x1 = x1.extract::<RistrettoPoint>();
+
+    let x2_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x2 = ristretto255::new_point_from_bytes(x2_bytes);
+    if (!x2.is_some::<RistrettoPoint>()) {
+        return std::option::none<WithdrawalSubproof>()
+    };
+    let x2 = x2.extract::<RistrettoPoint>();
+
+    let x3_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x3 = ristretto255::new_point_from_bytes(x3_bytes);
+    if (!x3.is_some::<RistrettoPoint>()) {
+        return std::option::none<WithdrawalSubproof>()
+    };
+    let x3 = x3.extract::<RistrettoPoint>();
+
+    let alpha1_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let alpha1 = ristretto255::new_scalar_from_bytes(alpha1_bytes);
+    if (!alpha1.is_some()) {
+        return std::option::none<WithdrawalSubproof>()
+    };
+    let alpha1 = alpha1.extract();
+
+    let alpha2_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let alpha2 = ristretto255::new_scalar_from_bytes(alpha2_bytes);
+    if (!alpha2.is_some()) {
+        return std::option::none<WithdrawalSubproof>()
+    };
+    let alpha2 = alpha2.extract();
+
+    let alpha3_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let alpha3 = ristretto255::new_scalar_from_bytes(alpha3_bytes);
+    if (!alpha3.is_some()) {
+        return std::option::none<WithdrawalSubproof>()
+    };
+    let alpha3 = alpha3.extract();
+
+    std::option::some(WithdrawalSubproof {
+        x1, x2, x3, alpha1, alpha2, alpha3
+    })
+}
+
+ + + +
+ + + +## Function `deserialize_transfer_subproof` + +Deserializes and returns a TransferSubproof given its byte representation. + + +
public fun deserialize_transfer_subproof(proof_bytes: vector<u8>): option::Option<sigma_protos::TransferSubproof>
+
+ + + +
+Implementation + + +
public fun deserialize_transfer_subproof(proof_bytes: vector<u8>): Option<TransferSubproof> {
+    if (proof_bytes.length::<u8>() != 384) {
+        return std::option::none<TransferSubproof>()
+    };
+
+    let x1_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x1 = ristretto255::new_point_from_bytes(x1_bytes);
+    if (!x1.is_some::<RistrettoPoint>()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let x1 = x1.extract::<RistrettoPoint>();
+
+    let x2_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x2 = ristretto255::new_point_from_bytes(x2_bytes);
+    if (!x2.is_some::<RistrettoPoint>()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let x2 = x2.extract::<RistrettoPoint>();
+
+    let x3_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x3 = ristretto255::new_point_from_bytes(x3_bytes);
+    if (!x3.is_some::<RistrettoPoint>()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let x3 = x3.extract::<RistrettoPoint>();
+
+    let x4_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x4 = ristretto255::new_point_from_bytes(x4_bytes);
+    if (!x4.is_some::<RistrettoPoint>()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let x4 = x4.extract::<RistrettoPoint>();
+
+    let x5_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x5 = ristretto255::new_point_from_bytes(x5_bytes);
+    if (!x5.is_some::<RistrettoPoint>()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let x5 = x5.extract::<RistrettoPoint>();
+
+    let x6_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x6 = ristretto255::new_point_from_bytes(x6_bytes);
+    if (!x6.is_some::<RistrettoPoint>()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let x6 = x6.extract::<RistrettoPoint>();
+
+    let x7_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let x7 = ristretto255::new_point_from_bytes(x7_bytes);
+    if (!x7.is_some::<RistrettoPoint>()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let x7 = x7.extract::<RistrettoPoint>();
+
+    let alpha1_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let alpha1 = ristretto255::new_scalar_from_bytes(alpha1_bytes);
+    if (!alpha1.is_some()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let alpha1 = alpha1.extract();
+
+    let alpha2_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let alpha2 = ristretto255::new_scalar_from_bytes(alpha2_bytes);
+    if (!alpha2.is_some()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let alpha2 = alpha2.extract();
+
+    let alpha3_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let alpha3 = ristretto255::new_scalar_from_bytes(alpha3_bytes);
+    if (!alpha3.is_some()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let alpha3 = alpha3.extract();
+
+    let alpha4_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let alpha4 = ristretto255::new_scalar_from_bytes(alpha4_bytes);
+    if (!alpha4.is_some()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let alpha4 = alpha4.extract();
+
+    let alpha5_bytes = cut_vector<u8>(&mut proof_bytes, 32);
+    let alpha5 = ristretto255::new_scalar_from_bytes(alpha5_bytes);
+    if (!alpha5.is_some()) {
+        return std::option::none<TransferSubproof>()
+    };
+    let alpha5 = alpha5.extract();
+
+    std::option::some(TransferSubproof {
+        x1, x2, x3, x4, x5, x6, x7, alpha1, alpha2, alpha3, alpha4, alpha5
+    })
+}
+
+ + + +
+ + + +## Function `fiat_shamir_withdrawal_subproof_challenge` + +Computes a Fiat-Shamir challenge rho = H(G, H, Y, C_1, C_2, c, x_1, x_2, x_3) for the WithdrawalSubproof +$\Sigma$-protocol. + + +
fun fiat_shamir_withdrawal_subproof_challenge(sender_pk: &ristretto255_elgamal::CompressedPubkey, sender_curr_balance_ct: &ristretto255_elgamal::Ciphertext, sender_new_balance_comm: &ristretto255_pedersen::Commitment, amount: &ristretto255::Scalar, x1: &ristretto255::RistrettoPoint, x2: &ristretto255::RistrettoPoint, x3: &ristretto255::RistrettoPoint): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun fiat_shamir_withdrawal_subproof_challenge(
+    sender_pk: &elgamal::CompressedPubkey,
+    sender_curr_balance_ct: &elgamal::Ciphertext,
+    sender_new_balance_comm: &pedersen::Commitment,
+    amount: &Scalar,
+    x1: &RistrettoPoint,
+    x2: &RistrettoPoint,
+    x3: &RistrettoPoint): Scalar
+{
+    let (c1, c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct);
+    let c = pedersen::commitment_as_point(sender_new_balance_comm);
+    let y = elgamal::pubkey_to_compressed_point(sender_pk);
+
+    let bytes = vector::empty<u8>();
+
+    bytes.append::<u8>(FIAT_SHAMIR_SIGMA_DST);
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::basepoint_compressed()));
+    bytes.append::<u8>(ristretto255::point_to_bytes(
+        &ristretto255::point_compress(&pedersen::randomness_base_for_bulletproof())));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&y));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(c1)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(c2)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(c)));
+    bytes.append::<u8>(ristretto255::scalar_to_bytes(amount));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x1)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x2)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x3)));
+
+    ristretto255::new_scalar_from_sha2_512(bytes)
+}
+
+ + + +
+ + + +## Function `fiat_shamir_transfer_subproof_challenge` + +Computes a Fiat-Shamir challenge rho = H(G, H, Y, Y', C, D, c, c_1, c_2, \bar{c}, {X_i}_{i=1}^7) for the +TransferSubproof $\Sigma$-protocol. + + +
fun fiat_shamir_transfer_subproof_challenge(sender_pk: &ristretto255_elgamal::CompressedPubkey, recipient_pk: &ristretto255_elgamal::CompressedPubkey, withdraw_ct: &ristretto255_elgamal::Ciphertext, deposit_ct: &ristretto255_elgamal::Ciphertext, comm_amount: &ristretto255_pedersen::Commitment, sender_curr_balance_ct: &ristretto255_elgamal::Ciphertext, sender_new_balance_comm: &ristretto255_pedersen::Commitment, x1: &ristretto255::RistrettoPoint, x2: &ristretto255::RistrettoPoint, x3: &ristretto255::RistrettoPoint, x4: &ristretto255::RistrettoPoint, x5: &ristretto255::RistrettoPoint, x6: &ristretto255::RistrettoPoint, x7: &ristretto255::RistrettoPoint): ristretto255::Scalar
+
+ + + +
+Implementation + + +
fun fiat_shamir_transfer_subproof_challenge(
+    sender_pk: &elgamal::CompressedPubkey,
+    recipient_pk: &elgamal::CompressedPubkey,
+    withdraw_ct: &elgamal::Ciphertext,
+    deposit_ct: &elgamal::Ciphertext,
+    comm_amount: &pedersen::Commitment,
+    sender_curr_balance_ct: &elgamal::Ciphertext,
+    sender_new_balance_comm: &pedersen::Commitment,
+    x1: &RistrettoPoint,
+    x2: &RistrettoPoint,
+    x3: &RistrettoPoint,
+    x4: &RistrettoPoint,
+    x5: &RistrettoPoint,
+    x6: &RistrettoPoint,
+    x7: &RistrettoPoint): Scalar
+{
+    let y = elgamal::pubkey_to_compressed_point(sender_pk);
+    let y_prime = elgamal::pubkey_to_compressed_point(recipient_pk);
+    let (big_c, big_d) = elgamal::ciphertext_as_points(withdraw_ct);
+    let (big_c_prime, _) = elgamal::ciphertext_as_points(deposit_ct);
+    let c = pedersen::commitment_as_point(comm_amount);
+    let (c1, c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct);
+    let bar_c = pedersen::commitment_as_point(sender_new_balance_comm);
+
+    let bytes = vector::empty<u8>();
+
+    bytes.append::<u8>(FIAT_SHAMIR_SIGMA_DST);
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::basepoint_compressed()));
+    bytes.append::<u8>(ristretto255::point_to_bytes(
+        &ristretto255::point_compress(&pedersen::randomness_base_for_bulletproof())));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&y));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&y_prime));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(big_c)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(big_c_prime)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(big_d)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(c)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(c1)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(c2)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(bar_c)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x1)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x2)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x3)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x4)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x5)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x6)));
+    bytes.append::<u8>(ristretto255::point_to_bytes(&ristretto255::point_compress(x7)));
+
+    ristretto255::new_scalar_from_sha2_512(bytes)
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/test_derivable_account_abstraction_ed25519_hex.md b/aptos-move/framework/aptos-experimental/doc/test_derivable_account_abstraction_ed25519_hex.md new file mode 100644 index 0000000000000..ab5201f8ff979 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/test_derivable_account_abstraction_ed25519_hex.md @@ -0,0 +1,80 @@ + + + +# Module `0x7::test_derivable_account_abstraction_ed25519_hex` + +Domain account abstraction using ed25519 hex for signing. + +Authentication takes digest, converts to hex (prefixed with 0x, with lowercase letters), +and then expects that to be signed. +authenticator is expected to be signature: vector +account_identity is raw public_key. + + +- [Constants](#@Constants_0) +- [Function `authenticate`](#0x7_test_derivable_account_abstraction_ed25519_hex_authenticate) + + +
use 0x1::auth_data;
+use 0x1::ed25519;
+use 0x1::error;
+use 0x1::string;
+use 0x1::string_utils;
+
+ + + + + +## Constants + + + + + + +
const EINVALID_SIGNATURE: u64 = 1;
+
+ + + + + +## Function `authenticate` + +Authorization function for domain account abstraction. + + +
public fun authenticate(account: signer, aa_auth_data: auth_data::AbstractionAuthData): signer
+
+ + + +
+Implementation + + +
public fun authenticate(account: signer, aa_auth_data: AbstractionAuthData): signer {
+    let hex_digest = string_utils::to_string(aa_auth_data.digest());
+
+    let public_key = new_unvalidated_public_key_from_bytes(*aa_auth_data.derivable_abstract_public_key());
+    let signature = new_signature_from_bytes(*aa_auth_data.derivable_abstract_signature());
+    assert!(
+        ed25519::signature_verify_strict(
+            &signature,
+            &public_key,
+            *hex_digest.bytes(),
+        ),
+        error::permission_denied(EINVALID_SIGNATURE)
+    );
+
+    account
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/test_function_values.md b/aptos-move/framework/aptos-experimental/doc/test_function_values.md new file mode 100644 index 0000000000000..9e047f648257c --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/test_function_values.md @@ -0,0 +1,68 @@ + + + +# Module `0x7::test_function_values` + + + +- [Struct `Funcs`](#0x7_test_function_values_Funcs) +- [Function `transfer_and_create_account`](#0x7_test_function_values_transfer_and_create_account) + + +
+ + + + + +## Struct `Funcs` + + + +
struct Funcs
+
+ + + +
+Fields + + +
+
+f: |u64|u64 has copy + drop +
+
+ +
+
+ + +
+ + + +## Function `transfer_and_create_account` + + + +
fun transfer_and_create_account(some_f: |u64|u64): u64
+
+ + + +
+Implementation + + +
fun transfer_and_create_account(some_f: |u64|u64): u64 {
+    some_f(3)
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/veiled_coin.md b/aptos-move/framework/aptos-experimental/doc/veiled_coin.md new file mode 100644 index 0000000000000..7eb9ed4ff8c59 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/veiled_coin.md @@ -0,0 +1,1510 @@ + + + +# Module `0x7::veiled_coin` + +**WARNING:** This is an **experimental, proof-of-concept** module! It is *NOT* production-ready and it will likely +lead to loss of funds if used (or misused). + +This module provides a veiled coin type, denoted VeiledCoin<T> that hides the value/denomination of a coin. +Importantly, although veiled transactions hide the amount of coins sent they still leak the sender and recipient. + + + + +### How to use veiled coins + + +This module allows users to "register" a veiled account for any pre-existing aptos_framework::Coin type T via +the register entry function. For this, an encryption public key will need to be given as input, under which +the registered user's veiled balance will be encrypted. + +Once Alice registers a veiled account for T, she can call veil with any public amount a of T coins +and add them to her veiled balance. Note that these coins will not be properly veiled yet, since they were withdrawn +from a public balance, which leaks their value. + +(Alternatively, another user can initialize Alice's veiled balance by calling veil_to.) + +Suppose Bob also registers and veils b of his own coins of type T. + +Now Alice can use fully_veiled_transfer to send to Bob a secret amount v of coins from her veiled balance. +This will, for the first time, properly hide both Alice's and Bob's veiled balance. +The only information that an attacker (e.g., an Aptos validator) learns, is that Alice transferred an unknown amount +v to Bob (including $v=0$), and as a result Alice's veiled balance is in a range [a-v, a] and Bob's veiled balance +is in [b, b+v]. + +As more veiled transfers occur between more veiled accounts, the uncertainity on the balance of each account becomes +larger and larger. + +Lastly, users can easily withdraw veiled coins back into their public balance via unveil. Or, they can withdraw +publicly into someone else's public balance via unveil_to. + + + + +### Terminology + + +1. *Veiled coin*: a coin whose value is secret; i.e., it is encrypted under the owner's public key. + +2. *Veiled amount*: any amount that is secret because it was encrypted under some public key. +3. *Committed amount*: any amount that is secret because it was committed to (rather than encrypted). + +4. *Veiled transaction*: a transaction that hides its amount transferred; i.e., a transaction whose amount is veiled. + +5. *Veiled balance*: unlike a normal balance, a veiled balance is secret; i.e., it is encrypted under the account's +public key. + +6. *ZKRP*: zero-knowledge range proofs; one of the key cryptographic ingredient in veiled coins which ensures users +can withdraw secretely from their veiled balance without over-withdrawing. + + + + +### Limitations + + +**WARNING:** This module is **experimental**! It is *NOT* production-ready. Specifically: + +1. Deploying this module will likely lead to lost funds. +2. This module has not been cryptographically-audited. +3. The current implementation is vulnerable to _front-running attacks_ as described in the Zether paper [BAZB20]. +4. There is no integration with wallet software which, for veiled accounts, must maintain an additional ElGamal +encryption keypair. +5. There is no support for rotating the ElGamal encryption public key of a veiled account. + + +coin_amounts_as_truncated_u32's_3"> + +### Veiled coin amounts as truncated u32's + + +Veiled coin amounts must be specified as u32's rather than u64's as would be typical for normal coins in the +Aptos framework. This is because coin amounts must be encrypted with an *efficient*, additively-homomorphic encryption +scheme. Currently, our best candidate is ElGamal encryption in the exponent, which can only decrypt values around +32 bits or slightly larger. + +Specifically, veiled coin amounts are restricted to be 32 bits and can be cast to a normal 64-bit coin value by +setting the leftmost and rightmost 16 bits to zero and the "middle" 32 bits to be the veiled coin bits. + +This gives veiled amounts ~10 bits for specifying ~3 decimals and ~22 bits for specifying whole amounts, which +limits veiled balances and veiled transfers to around 4 million coins. (See coin.move for how a normal 64-bit coin +value gets interpreted as a decimal number.) + +In order to convert a u32 veiled coin amount to a normal u64 coin amount, we have to shift it left by 16 bits. + +``` +u64 normal coin amount format: +[ left || middle || right ] +[ 63 - 32 || 31 - 16 || 15 - 0] + +u32 veiled coin amount format; we take the middle 32 bits from the u64 format above and store them in a u32: +[ middle ] +[ 31 - 0 ] +``` + +Recall that: A coin has a *decimal precision* $d$ (e.g., for AptosCoin, $d = 8$; see initialize in +aptos_coin.move). This precision $d$ is used when displaying a u64 amount, by dividing the amount by $10^d$. +For example, if the precision $d = 2$, then a u64 amount of 505 coins displays as 5.05 coins. + +For veiled coins, we can easily display a u32 Coin<T> amount $v$ by: +1. Casting $v$ as a u64 and shifting this left by 16 bits, obtaining a 64-bit $v'$ +2. Displaying $v'$ normally, by dividing it by $d$, which is the precision in CoinInfo<T>. + + + + +### Implementation details + + +This module leverages a so-called "resource account," which helps us mint a VeiledCoin<T> from a +normal coin::Coin<T> by transferring this latter coin into a coin::CoinStore<T> stored in the +resource account. + +Later on, when someone wants to convert their VeiledCoin<T> into a normal coin::Coin<T>, +the resource account can be used to transfer out the normal from its coin store. Transferring out a coin like this +requires a signer for the resource account, which the veiled_coin module can obtain via a SignerCapability. + + + + +### References + + +[BAZB20] Zether: Towards Privacy in a Smart Contract World; by Bunz, Benedikt and Agrawal, Shashank and Zamani, +Mahdi and Boneh, Dan; in Financial Cryptography and Data Security; 2020 + + + - [How to use veiled coins](#@How_to_use_veiled_coins_0) + - [Terminology](#@Terminology_1) + - [Limitations](#@Limitations_2) + - [Veiled coin amounts as truncated u32's](#@Veiled_coin_amounts_as_truncated_u32's_3) + - [Implementation details](#@Implementation_details_4) + - [References](#@References_5) +- [Resource `VeiledCoinStore`](#0x7_veiled_coin_VeiledCoinStore) +- [Struct `Deposit`](#0x7_veiled_coin_Deposit) +- [Struct `Withdraw`](#0x7_veiled_coin_Withdraw) +- [Resource `VeiledCoinMinter`](#0x7_veiled_coin_VeiledCoinMinter) +- [Struct `VeiledCoin`](#0x7_veiled_coin_VeiledCoin) +- [Struct `TransferProof`](#0x7_veiled_coin_TransferProof) +- [Struct `WithdrawalProof`](#0x7_veiled_coin_WithdrawalProof) +- [Constants](#@Constants_6) +- [Function `init_module`](#0x7_veiled_coin_init_module) +- [Function `register`](#0x7_veiled_coin_register) +- [Function `veil_to`](#0x7_veiled_coin_veil_to) +- [Function `veil`](#0x7_veiled_coin_veil) +- [Function `unveil_to`](#0x7_veiled_coin_unveil_to) +- [Function `unveil`](#0x7_veiled_coin_unveil) +- [Function `fully_veiled_transfer`](#0x7_veiled_coin_fully_veiled_transfer) +- [Function `clamp_u64_to_u32_amount`](#0x7_veiled_coin_clamp_u64_to_u32_amount) +- [Function `cast_u32_to_u64_amount`](#0x7_veiled_coin_cast_u32_to_u64_amount) +- [Function `has_veiled_coin_store`](#0x7_veiled_coin_has_veiled_coin_store) +- [Function `veiled_amount`](#0x7_veiled_coin_veiled_amount) +- [Function `veiled_balance`](#0x7_veiled_coin_veiled_balance) +- [Function `encryption_public_key`](#0x7_veiled_coin_encryption_public_key) +- [Function `total_veiled_coins`](#0x7_veiled_coin_total_veiled_coins) +- [Function `get_veiled_coin_bulletproofs_dst`](#0x7_veiled_coin_get_veiled_coin_bulletproofs_dst) +- [Function `get_max_bits_in_veiled_coin_value`](#0x7_veiled_coin_get_max_bits_in_veiled_coin_value) +- [Function `register_internal`](#0x7_veiled_coin_register_internal) +- [Function `veiled_deposit`](#0x7_veiled_coin_veiled_deposit) +- [Function `unveil_to_internal`](#0x7_veiled_coin_unveil_to_internal) +- [Function `fully_veiled_transfer_internal`](#0x7_veiled_coin_fully_veiled_transfer_internal) +- [Function `verify_range_proofs`](#0x7_veiled_coin_verify_range_proofs) +- [Function `get_resource_account_signer`](#0x7_veiled_coin_get_resource_account_signer) +- [Function `veiled_mint_from_coin`](#0x7_veiled_coin_veiled_mint_from_coin) + + +
use 0x1::account;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::event;
+use 0x1::option;
+use 0x1::ristretto255;
+use 0x1::ristretto255_bulletproofs;
+use 0x1::ristretto255_elgamal;
+use 0x1::ristretto255_pedersen;
+use 0x1::signer;
+use 0x7::helpers;
+use 0x7::sigma_protos;
+
+ + + + + +## Resource `VeiledCoinStore` + +A holder of a specific coin type and its associated event handles. +These are kept in a single resource to ensure locality of data. + + +
struct VeiledCoinStore<CoinType> has key
+
+ + + +
+Fields + + +
+
+veiled_balance: ristretto255_elgamal::CompressedCiphertext +
+
+ A ElGamal ciphertext of a value $v \in [0, 2^{32})$, an invariant that is enforced throughout the code. +
+
+pk: ristretto255_elgamal::CompressedPubkey +
+
+ +
+
+ + +
+ + + +## Struct `Deposit` + +Event emitted when some amount of veiled coins were deposited into an account. + + +
#[event]
+struct Deposit has drop, store
+
+ + + +
+Fields + + +
+
+user: address +
+
+ +
+
+ + +
+ + + +## Struct `Withdraw` + +Event emitted when some amount of veiled coins were withdrawn from an account. + + +
#[event]
+struct Withdraw has drop, store
+
+ + + +
+Fields + + +
+
+user: address +
+
+ +
+
+ + +
+ + + +## Resource `VeiledCoinMinter` + +Holds an account::SignerCapability for the resource account created when initializing this module. This +resource account houses a coin::CoinStore<T> for every type of coin T that is veiled. + + +
struct VeiledCoinMinter has store, key
+
+ + + +
+Fields + + +
+
+signer_cap: account::SignerCapability +
+
+ +
+
+ + +
+ + + +## Struct `VeiledCoin` + +Main structure representing a coin in an account's custody. + + +
struct VeiledCoin<CoinType>
+
+ + + +
+Fields + + +
+
+veiled_amount: ristretto255_elgamal::Ciphertext +
+
+ ElGamal ciphertext which encrypts the number of coins $v \in [0, 2^{32})$. This $[0, 2^{32})$ range invariant + is enforced throughout the code via Bulletproof-based ZK range proofs. +
+
+ + +
+ + + +## Struct `TransferProof` + +A cryptographic proof that ensures correctness of a veiled-to-veiled coin transfer. + + +
struct TransferProof has drop
+
+ + + +
+Fields + + +
+
+sigma_proof: sigma_protos::TransferSubproof +
+
+ +
+
+zkrp_new_balance: ristretto255_bulletproofs::RangeProof +
+
+ +
+
+zkrp_amount: ristretto255_bulletproofs::RangeProof +
+
+ +
+
+ + +
+ + + +## Struct `WithdrawalProof` + +A cryptographic proof that ensures correctness of a veiled-to-*unveiled* coin transfer. + + +
struct WithdrawalProof has drop
+
+ + + +
+Fields + + +
+
+sigma_proof: sigma_protos::WithdrawalSubproof +
+
+ +
+
+zkrp_new_balance: ristretto255_bulletproofs::RangeProof +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Not enough coins to complete transaction. + + +
const EINSUFFICIENT_BALANCE: u64 = 5;
+
+ + + + + +Non-specific internal error (see source code) + + +
const EINTERNAL_ERROR: u64 = 9;
+
+ + + + + +A range proof failed to verify. + + +
const ERANGE_PROOF_VERIFICATION_FAILED: u64 = 2;
+
+ + + + + +The range proof system does not support proofs for any number \in [0, 2^{32}) + + +
const ERANGE_PROOF_SYSTEM_HAS_INSUFFICIENT_RANGE: u64 = 1;
+
+ + + + + +Byte vector given for deserialization was the wrong length. + + +
const EBYTES_WRONG_LENGTH: u64 = 7;
+
+ + + + + +Failed deserializing bytes into either ElGamal ciphertext or $\Sigma$-protocol proof. + + +
const EDESERIALIZATION_FAILED: u64 = 6;
+
+ + + + + +The NUM_LEAST_SIGNIFICANT_BITS_REMOVED and NUM_MOST_SIGNIFICANT_BITS_REMOVED constants need to sum to 32 (bits). + + +
const EU64_COIN_AMOUNT_CLAMPING_IS_INCORRECT: u64 = 8;
+
+ + + + + +Account already has VeiledCoinStore<CoinType> registered. + + +
const EVEILED_COIN_STORE_ALREADY_PUBLISHED: u64 = 3;
+
+ + + + + +Account hasn't registered VeiledCoinStore<CoinType>. + + +
const EVEILED_COIN_STORE_NOT_PUBLISHED: u64 = 4;
+
+ + + + + +The maximum number of bits used to represent a coin's value. + + +
const MAX_BITS_IN_VEILED_COIN_VALUE: u64 = 32;
+
+ + + + + +When converting a u64 normal (public) amount to a u32 veiled amount, we keep the middle 32 bits and +remove the NUM_LEAST_SIGNIFICANT_BITS_REMOVED least significant bits and the NUM_MOST_SIGNIFICANT_BITS_REMOVED +most significant bits (see comments in the beginning of this file). + +When converting a u32 veiled amount to a u64 normal (public) amount, we simply cast it to u64 and shift it +left by NUM_LEAST_SIGNIFICANT_BITS_REMOVED. + + +
const NUM_LEAST_SIGNIFICANT_BITS_REMOVED: u8 = 16;
+
+ + + + + +See NUM_LEAST_SIGNIFICANT_BITS_REMOVED comments. + + +
const NUM_MOST_SIGNIFICANT_BITS_REMOVED: u8 = 16;
+
+ + + + + +The domain separation tag (DST) used for the Bulletproofs prover. + + +
const VEILED_COIN_BULLETPROOFS_DST: vector<u8> = [65, 112, 116, 111, 115, 86, 101, 105, 108, 101, 100, 67, 111, 105, 110, 47, 66, 117, 108, 108, 101, 116, 112, 114, 111, 111, 102, 82, 97, 110, 103, 101, 80, 114, 111, 111, 102];
+
+ + + + + +## Function `init_module` + +Initializes a so-called "resource" account which will maintain a coin::CoinStore<T> resource for all Coin<T>'s +that have been converted into a VeiledCoin<T>. + + +
fun init_module(deployer: &signer)
+
+ + + +
+Implementation + + +
fun init_module(deployer: &signer) {
+    assert!(
+        bulletproofs::get_max_range_bits() >= MAX_BITS_IN_VEILED_COIN_VALUE,
+        error::internal(ERANGE_PROOF_SYSTEM_HAS_INSUFFICIENT_RANGE)
+    );
+
+    assert!(
+        NUM_LEAST_SIGNIFICANT_BITS_REMOVED + NUM_MOST_SIGNIFICANT_BITS_REMOVED == 32,
+        error::internal(EU64_COIN_AMOUNT_CLAMPING_IS_INCORRECT)
+    );
+
+    // Create the resource account. This will allow this module to later obtain a `signer` for this account and
+    // transfer `Coin<T>`'s into its `CoinStore<T>` before minting a `VeiledCoin<T>`.
+    let (_resource, signer_cap) = account::create_resource_account(deployer, vector::empty());
+
+    move_to(deployer,
+        VeiledCoinMinter {
+            signer_cap
+        }
+    )
+}
+
+ + + +
+ + + +## Function `register` + +Initializes a veiled account for the specified user such that their balance is encrypted under public key pk. +Importantly, the user's wallet must retain their corresponding secret key. + + +
public entry fun register<CoinType>(user: &signer, pk: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun register<CoinType>(user: &signer, pk: vector<u8>) {
+    let pk = elgamal::new_pubkey_from_bytes(pk);
+    register_internal<CoinType>(user, pk.extract());
+}
+
+ + + +
+ + + +## Function `veil_to` + +Sends a *public* amount of normal coins from sender to the recipient's veiled balance. + +**WARNING:** This function *leaks* the transferred amount, since it is given as a public input. + + +
public entry fun veil_to<CoinType>(sender: &signer, recipient: address, amount: u32)
+
+ + + +
+Implementation + + +
public entry fun veil_to<CoinType>(
+    sender: &signer, recipient: address, amount: u32) acquires VeiledCoinMinter, VeiledCoinStore
+{
+    let c = coin::withdraw<CoinType>(sender, cast_u32_to_u64_amount(amount));
+
+    let vc = veiled_mint_from_coin(c);
+
+    veiled_deposit<CoinType>(recipient, vc)
+}
+
+ + + +
+ + + +## Function `veil` + +Like veil_to, except owner is both the sender and the recipient. + +This function can be used by the owner to initialize his veiled balance to a *public* value. + +**WARNING:** The initialized balance is *leaked*, since its initialized amount is public here. + + +
public entry fun veil<CoinType>(owner: &signer, amount: u32)
+
+ + + +
+Implementation + + +
public entry fun veil<CoinType>(owner: &signer, amount: u32) acquires VeiledCoinMinter, VeiledCoinStore {
+    veil_to<CoinType>(owner, signer::address_of(owner), amount)
+}
+
+ + + +
+ + + +## Function `unveil_to` + +Takes a *public* amount of VeiledCoin<CoinType> coins from sender, unwraps them to a coin::Coin<CoinType>, +and sends them to recipient. Maintains secrecy of sender's new balance. + +Requires a ZK range proof on the new balance of the sender, to ensure the sender has enough money to send. +No ZK range proof is necessary for the amount, which is given as a public u32 value. + +**WARNING:** This *leaks* the transferred amount, since it is a public u32 argument. + + +
public entry fun unveil_to<CoinType>(sender: &signer, recipient: address, amount: u32, comm_new_balance: vector<u8>, zkrp_new_balance: vector<u8>, withdraw_subproof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun unveil_to<CoinType>(
+    sender: &signer,
+    recipient: address,
+    amount: u32,
+    comm_new_balance: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    withdraw_subproof: vector<u8>) acquires VeiledCoinStore, VeiledCoinMinter
+{
+    // Deserialize all the proofs into their proper Move structs
+    let comm_new_balance = pedersen::new_commitment_from_bytes(comm_new_balance);
+    assert!(comm_new_balance.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED));
+
+    let sigma_proof = sigma_protos::deserialize_withdrawal_subproof(withdraw_subproof);
+    assert!(std::option::is_some(&sigma_proof), error::invalid_argument(EDESERIALIZATION_FAILED));
+
+    let comm_new_balance = comm_new_balance.extract();
+    let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance);
+
+    let withdrawal_proof = WithdrawalProof {
+        sigma_proof: std::option::extract(&mut sigma_proof),
+        zkrp_new_balance,
+    };
+
+    // Do the actual work
+    unveil_to_internal<CoinType>(sender, recipient, amount, comm_new_balance, withdrawal_proof);
+}
+
+ + + +
+ + + +## Function `unveil` + +Like unveil_to, except the sender is also the recipient. + + +
public entry fun unveil<CoinType>(sender: &signer, amount: u32, comm_new_balance: vector<u8>, zkrp_new_balance: vector<u8>, withdraw_subproof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun unveil<CoinType>(
+    sender: &signer,
+    amount: u32,
+    comm_new_balance: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    withdraw_subproof: vector<u8>) acquires VeiledCoinStore, VeiledCoinMinter
+{
+    unveil_to<CoinType>(
+        sender,
+        signer::address_of(sender),
+        amount,
+        comm_new_balance,
+        zkrp_new_balance,
+        withdraw_subproof
+    )
+}
+
+ + + +
+ + + +## Function `fully_veiled_transfer` + +Sends a *veiled* amount from sender to recipient. After this call, the veiled balances of both the sender +and the recipient remain (or become) secret. + +The sent amount always remains secret; It is encrypted both under the sender's PK (in withdraw_ct) & under the +recipient's PK (in deposit_ct) using the *same* ElGamal randomness, so as to allow for efficiently updating both +the sender's & recipient's veiled balances. It is also committed under comm_amount, so as to allow for a ZK +range proof. + +Requires a TransferProof; i.e.: +1. A range proof zkrp_new_balance on the new balance of the sender, to ensure the sender has enough money to +send. +2. A range proof zkrp_amount on the transferred amount in comm_amount, to ensure the sender won't create +coins out of thin air. +3. A $\Sigma$-protocol proof transfer_subproof which proves that 'withdraw_ct' encrypts the same veiled amount +as in 'deposit_ct' (with the same randomness) and as in comm_amount. + + +
public entry fun fully_veiled_transfer<CoinType>(sender: &signer, recipient: address, withdraw_ct: vector<u8>, deposit_ct: vector<u8>, comm_new_balance: vector<u8>, comm_amount: vector<u8>, zkrp_new_balance: vector<u8>, zkrp_amount: vector<u8>, transfer_subproof: vector<u8>)
+
+ + + +
+Implementation + + +
public entry fun fully_veiled_transfer<CoinType>(
+    sender: &signer,
+    recipient: address,
+    withdraw_ct: vector<u8>,
+    deposit_ct: vector<u8>,
+    comm_new_balance: vector<u8>,
+    comm_amount: vector<u8>,
+    zkrp_new_balance: vector<u8>,
+    zkrp_amount: vector<u8>,
+    transfer_subproof: vector<u8>) acquires VeiledCoinStore
+{
+    // Deserialize everything into their proper Move structs
+    let veiled_withdraw_amount = elgamal::new_ciphertext_from_bytes(withdraw_ct);
+    assert!(veiled_withdraw_amount.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED));
+
+    let veiled_deposit_amount = elgamal::new_ciphertext_from_bytes(deposit_ct);
+    assert!(veiled_deposit_amount.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED));
+
+    let comm_new_balance = pedersen::new_commitment_from_bytes(comm_new_balance);
+    assert!(comm_new_balance.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED));
+
+    let comm_amount = pedersen::new_commitment_from_bytes(comm_amount);
+    assert!(comm_amount.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED));
+
+    let transfer_subproof = sigma_protos::deserialize_transfer_subproof(transfer_subproof);
+    assert!(std::option::is_some(&transfer_subproof), error::invalid_argument(EDESERIALIZATION_FAILED));
+
+    let transfer_proof = TransferProof {
+        zkrp_new_balance: bulletproofs::range_proof_from_bytes(zkrp_new_balance),
+        zkrp_amount: bulletproofs::range_proof_from_bytes(zkrp_amount),
+        sigma_proof: std::option::extract(&mut transfer_subproof)
+    };
+
+    // Do the actual work
+    fully_veiled_transfer_internal<CoinType>(
+        sender,
+        recipient,
+        veiled_withdraw_amount.extract(),
+        veiled_deposit_amount.extract(),
+        comm_new_balance.extract(),
+        comm_amount.extract(),
+        &transfer_proof,
+    )
+}
+
+ + + +
+ + + +## Function `clamp_u64_to_u32_amount` + +Clamps a u64 normal public amount to a u32 to-be-veiled amount. + +WARNING: Precision is lost here (see "Veiled coin amounts as truncated u32's" in the top-level comments) + + +
public fun clamp_u64_to_u32_amount(amount: u64): u32
+
+ + + +
+Implementation + + +
public fun clamp_u64_to_u32_amount(amount: u64): u32 {
+    // Removes the `NUM_MOST_SIGNIFICANT_BITS_REMOVED` most significant bits.
+    amount << NUM_MOST_SIGNIFICANT_BITS_REMOVED;
+    amount >> NUM_MOST_SIGNIFICANT_BITS_REMOVED;
+
+    // Removes the other `32 - NUM_MOST_SIGNIFICANT_BITS_REMOVED` least significant bits.
+    amount = amount >> NUM_LEAST_SIGNIFICANT_BITS_REMOVED;
+
+    // We are now left with a 32-bit value
+    (amount as u32)
+}
+
+ + + +
+ + + +## Function `cast_u32_to_u64_amount` + +Casts a u32 to-be-veiled amount to a u64 normal public amount. No precision is lost here. + + +
public fun cast_u32_to_u64_amount(amount: u32): u64
+
+ + + +
+Implementation + + +
public fun cast_u32_to_u64_amount(amount: u32): u64 {
+    (amount as u64) << NUM_MOST_SIGNIFICANT_BITS_REMOVED
+}
+
+ + + +
+ + + +## Function `has_veiled_coin_store` + +Returns true if addr is registered to receive veiled coins of CoinType. + + +
public fun has_veiled_coin_store<CoinType>(addr: address): bool
+
+ + + +
+Implementation + + +
public fun has_veiled_coin_store<CoinType>(addr: address): bool {
+    exists<VeiledCoinStore<CoinType>>(addr)
+}
+
+ + + +
+ + + +## Function `veiled_amount` + +Returns the ElGamal encryption of the value of coin. + + +
public fun veiled_amount<CoinType>(coin: &veiled_coin::VeiledCoin<CoinType>): &ristretto255_elgamal::Ciphertext
+
+ + + +
+Implementation + + +
public fun veiled_amount<CoinType>(coin: &VeiledCoin<CoinType>): &elgamal::Ciphertext {
+    &coin.veiled_amount
+}
+
+ + + +
+ + + +## Function `veiled_balance` + +Returns the ElGamal encryption of the veiled balance of owner for the provided CoinType. + + +
public fun veiled_balance<CoinType>(owner: address): ristretto255_elgamal::CompressedCiphertext
+
+ + + +
+Implementation + + +
public fun veiled_balance<CoinType>(owner: address): elgamal::CompressedCiphertext acquires VeiledCoinStore {
+    assert!(
+        has_veiled_coin_store<CoinType>(owner),
+        error::not_found(EVEILED_COIN_STORE_NOT_PUBLISHED),
+    );
+
+    borrow_global<VeiledCoinStore<CoinType>>(owner).veiled_balance
+}
+
+ + + +
+ + + +## Function `encryption_public_key` + +Given an address addr, returns the ElGamal encryption public key associated with that address + + +
public fun encryption_public_key<CoinType>(addr: address): ristretto255_elgamal::CompressedPubkey
+
+ + + +
+Implementation + + +
public fun encryption_public_key<CoinType>(addr: address): elgamal::CompressedPubkey acquires VeiledCoinStore {
+    assert!(
+        has_veiled_coin_store<CoinType>(addr),
+        error::not_found(EVEILED_COIN_STORE_NOT_PUBLISHED)
+    );
+
+    borrow_global_mut<VeiledCoinStore<CoinType>>(addr).pk
+}
+
+ + + +
+ + + +## Function `total_veiled_coins` + +Returns the total supply of veiled coins + + +
public fun total_veiled_coins<CoinType>(): u64
+
+ + + +
+Implementation + + +
public fun total_veiled_coins<CoinType>(): u64 acquires VeiledCoinMinter {
+    let rsrc_acc_addr = signer::address_of(&get_resource_account_signer());
+    assert!(coin::is_account_registered<CoinType>(rsrc_acc_addr), EINTERNAL_ERROR);
+
+    coin::balance<CoinType>(rsrc_acc_addr)
+}
+
+ + + +
+ + + +## Function `get_veiled_coin_bulletproofs_dst` + +Returns the domain separation tag (DST) for constructing Bulletproof-based range proofs in this module. + + +
public fun get_veiled_coin_bulletproofs_dst(): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_veiled_coin_bulletproofs_dst(): vector<u8> {
+    VEILED_COIN_BULLETPROOFS_DST
+}
+
+ + + +
+ + + +## Function `get_max_bits_in_veiled_coin_value` + +Returns the maximum # of bits used to represent a veiled coin amount. Might differ than the 64 bits used to +represent normal aptos_framework::coin::Coin values. + + +
public fun get_max_bits_in_veiled_coin_value(): u64
+
+ + + +
+Implementation + + +
public fun get_max_bits_in_veiled_coin_value(): u64 {
+    MAX_BITS_IN_VEILED_COIN_VALUE
+}
+
+ + + +
+ + + +## Function `register_internal` + +Like register, but the public key has been parsed in a type-safe struct. +TODO: Do we want to require a PoK of the SK here? + + +
public fun register_internal<CoinType>(user: &signer, pk: ristretto255_elgamal::CompressedPubkey)
+
+ + + +
+Implementation + + +
public fun register_internal<CoinType>(user: &signer, pk: elgamal::CompressedPubkey) {
+    let account_addr = signer::address_of(user);
+    assert!(
+        !has_veiled_coin_store<CoinType>(account_addr),
+        error::already_exists(EVEILED_COIN_STORE_ALREADY_PUBLISHED),
+    );
+
+    // Note: There is no way to find an ElGamal SK such that the `(0_G, 0_G)` ciphertext below decrypts to a non-zero
+    // value. We'd need to have `(r * G, v * G + r * pk) = (0_G, 0_G)`, which implies `r = 0` for any choice of PK/SK.
+    // Thus, we must have `v * G = 0_G`, which implies `v = 0`.
+
+    let coin_store = VeiledCoinStore<CoinType> {
+        veiled_balance: helpers::get_veiled_balance_zero_ciphertext(),
+        pk,
+    };
+    move_to(user, coin_store);
+}
+
+ + + +
+ + + +## Function `veiled_deposit` + +Deposits a veiled coin at address to_addr. + + +
public fun veiled_deposit<CoinType>(to_addr: address, coin: veiled_coin::VeiledCoin<CoinType>)
+
+ + + +
+Implementation + + +
public fun veiled_deposit<CoinType>(to_addr: address, coin: VeiledCoin<CoinType>) acquires VeiledCoinStore {
+    assert!(
+        has_veiled_coin_store<CoinType>(to_addr),
+        error::not_found(EVEILED_COIN_STORE_NOT_PUBLISHED),
+    );
+
+    let veiled_coin_store = borrow_global_mut<VeiledCoinStore<CoinType>>(to_addr);
+
+    // Fetch the veiled balance
+    let veiled_balance = elgamal::decompress_ciphertext(&veiled_coin_store.veiled_balance);
+
+    // Add the veiled amount to the veiled balance (leverages the homomorphism of the encryption scheme)
+    elgamal::ciphertext_add_assign(&mut veiled_balance, &coin.veiled_amount);
+
+    // Update the veiled balance
+    veiled_coin_store.veiled_balance = elgamal::compress_ciphertext(&veiled_balance);
+
+    // Make sure the veiled coin is dropped so it cannot be double spent
+    let VeiledCoin<CoinType> { veiled_amount: _ } = coin;
+
+    // Once successful, emit an event that a veiled deposit occurred.
+    event::emit(
+        Deposit { user: to_addr },
+    );
+}
+
+ + + +
+ + + +## Function `unveil_to_internal` + +Like unveil_to, except the proofs have been deserialized into type-safe structs. + + +
public fun unveil_to_internal<CoinType>(sender: &signer, recipient: address, amount: u32, comm_new_balance: ristretto255_pedersen::Commitment, withdrawal_proof: veiled_coin::WithdrawalProof)
+
+ + + +
+Implementation + + +
public fun unveil_to_internal<CoinType>(
+    sender: &signer,
+    recipient: address,
+    amount: u32,
+    comm_new_balance: pedersen::Commitment,
+    withdrawal_proof: WithdrawalProof
+) acquires VeiledCoinStore, VeiledCoinMinter {
+    let addr = signer::address_of(sender);
+    assert!(
+        has_veiled_coin_store<CoinType>(addr),
+        error::not_found(EVEILED_COIN_STORE_NOT_PUBLISHED)
+    );
+
+    // Fetch the sender's ElGamal encryption public key
+    let sender_pk = encryption_public_key<CoinType>(addr);
+
+    // Fetch the sender's veiled balance
+    let veiled_coin_store = borrow_global_mut<VeiledCoinStore<CoinType>>(addr);
+    let veiled_balance = elgamal::decompress_ciphertext(&veiled_coin_store.veiled_balance);
+
+    // Create a (not-yet-secure) encryption of `amount`, since `amount` is a public argument here.
+    let scalar_amount = ristretto255::new_scalar_from_u32(amount);
+
+    // Verify that `comm_new_balance` is a commitment to the remaing balance after withdrawing `amount`.
+    sigma_protos::verify_withdrawal_subproof(
+        &sender_pk,
+        &veiled_balance,
+        &comm_new_balance,
+        &scalar_amount,
+        &withdrawal_proof.sigma_proof);
+
+    // Verify a ZK range proof on `comm_new_balance` (and thus on the remaining `veiled_balance`)
+    verify_range_proofs(
+        &comm_new_balance,
+        &withdrawal_proof.zkrp_new_balance,
+        &std::option::none(),
+        &std::option::none());
+
+    let veiled_amount = elgamal::new_ciphertext_no_randomness(&scalar_amount);
+
+    // Withdraw `amount` from the veiled balance (leverages the homomorphism of the encryption scheme.)
+    elgamal::ciphertext_sub_assign(&mut veiled_balance, &veiled_amount);
+
+    // Update the veiled balance to reflect the veiled withdrawal
+    veiled_coin_store.veiled_balance = elgamal::compress_ciphertext(&veiled_balance);
+
+    // Emit event to indicate a veiled withdrawal occurred
+    event::emit(
+        Withdraw { user: addr },
+    );
+
+    // Withdraw normal `Coin`'s from the resource account and deposit them in the recipient's
+    let c = coin::withdraw(&get_resource_account_signer(), cast_u32_to_u64_amount(amount));
+
+    coin::deposit<CoinType>(recipient, c);
+}
+
+ + + +
+ + + +## Function `fully_veiled_transfer_internal` + +Like fully_veiled_transfer, except the ciphertext and proofs have been deserialized into type-safe structs. + + +
public fun fully_veiled_transfer_internal<CoinType>(sender: &signer, recipient_addr: address, veiled_withdraw_amount: ristretto255_elgamal::Ciphertext, veiled_deposit_amount: ristretto255_elgamal::Ciphertext, comm_new_balance: ristretto255_pedersen::Commitment, comm_amount: ristretto255_pedersen::Commitment, transfer_proof: &veiled_coin::TransferProof)
+
+ + + +
+Implementation + + +
public fun fully_veiled_transfer_internal<CoinType>(
+    sender: &signer,
+    recipient_addr: address,
+    veiled_withdraw_amount: elgamal::Ciphertext,
+    veiled_deposit_amount: elgamal::Ciphertext,
+    comm_new_balance: pedersen::Commitment,
+    comm_amount: pedersen::Commitment,
+    transfer_proof: &TransferProof) acquires VeiledCoinStore
+{
+    let sender_addr = signer::address_of(sender);
+
+    let sender_pk = encryption_public_key<CoinType>(sender_addr);
+    let recipient_pk = encryption_public_key<CoinType>(recipient_addr);
+
+    // Note: The `encryption_public_key` call from above already asserts that `sender_addr` has a coin store.
+    let sender_veiled_coin_store = borrow_global_mut<VeiledCoinStore<CoinType>>(sender_addr);
+
+    // Fetch the veiled balance of the veiled account
+    let veiled_balance = elgamal::decompress_ciphertext(&sender_veiled_coin_store.veiled_balance);
+
+    // Checks that `veiled_withdraw_amount` and `veiled_deposit_amount` encrypt the same amount of coins, under the
+    // sender and recipient's PKs. Also checks this amount is committed inside `comm_amount`. Also, checks that the
+    // new balance encrypted in `veiled_balance` is committed in `comm_new_balance`.
+    sigma_protos::verify_transfer_subproof(
+        &sender_pk,
+        &recipient_pk,
+        &veiled_withdraw_amount,
+        &veiled_deposit_amount,
+        &comm_amount,
+        &comm_new_balance,
+        &veiled_balance,
+        &transfer_proof.sigma_proof);
+
+    // Update the account's veiled balance by homomorphically subtracting the veiled amount from the veiled balance.
+    elgamal::ciphertext_sub_assign(&mut veiled_balance, &veiled_withdraw_amount);
+
+
+    // Verifies range proofs on the transferred amount and the remaining balance
+    verify_range_proofs(
+        &comm_new_balance,
+        &transfer_proof.zkrp_new_balance,
+        &std::option::some(comm_amount),
+        &std::option::some(transfer_proof.zkrp_amount));
+
+    // Update the veiled balance to reflect the veiled withdrawal
+    sender_veiled_coin_store.veiled_balance = elgamal::compress_ciphertext(&veiled_balance);
+
+    // Once everything succeeds, emit an event to indicate a veiled withdrawal occurred
+    event::emit(
+        Withdraw { user: sender_addr },
+    );
+
+    // Create a new veiled coin for the recipient.
+    let vc = VeiledCoin<CoinType> { veiled_amount: veiled_deposit_amount };
+
+    // Deposits `veiled_deposit_amount` into the recipient's account
+    // (Note, if this aborts, the whole transaction aborts, so we do not need to worry about atomicity.)
+    veiled_deposit(recipient_addr, vc);
+}
+
+ + + +
+ + + +## Function `verify_range_proofs` + +Verifies range proofs on the remaining balance of an account committed in comm_new_balance and, optionally, on +the transferred amount committed inside comm_amount. + + +
public fun verify_range_proofs(comm_new_balance: &ristretto255_pedersen::Commitment, zkrp_new_balance: &ristretto255_bulletproofs::RangeProof, comm_amount: &option::Option<ristretto255_pedersen::Commitment>, zkrp_amount: &option::Option<ristretto255_bulletproofs::RangeProof>)
+
+ + + +
+Implementation + + +
public fun verify_range_proofs(
+    comm_new_balance: &pedersen::Commitment,
+    zkrp_new_balance: &RangeProof,
+    comm_amount: &Option<pedersen::Commitment>,
+    zkrp_amount: &Option<RangeProof>
+) {
+    // Let `amount` denote the amount committed in `comm_amount` and `new_bal` the balance committed in `comm_new_balance`.
+    //
+    // This function checks if it is possible to withdraw a veiled `amount` from a veiled `bal`, obtaining a new
+    // veiled balance `new_bal = bal - amount`. This function is used to maintains a key safety invariant throughout
+    // the veild coin code: i.e., that every account has `new_bal \in [0, 2^{32})`.
+    //
+    // This invariant is enforced as follows:
+    //
+    //  1. We assume (by the invariant) that `bal \in [0, 2^{32})`.
+    //
+    //  2. We verify a ZK range proof that `amount \in [0, 2^{32})`. Otherwise, a sender could set `amount = p-1`
+    //     where `p` is the order of the scalar field, which would give `new_bal = bal - (p-1) mod p = bal + 1`.
+    //     Therefore, a malicious spender could create coins out of thin air for themselves.
+    //
+    //  3. We verify a ZK range proof that `new_bal \in [0, 2^{32})`. Otherwise, a sender could set `amount = bal + 1`,
+    //     which would satisfy condition (2) from above but would give `new_bal = bal - (bal + 1) = -1`. Therefore,
+    //     a malicious spender could spend more coins than they have.
+    //
+    // Altogether, these checks ensure that `bal - amount >= 0` (as integers) and therefore that `bal >= amount`
+    // (again, as integers).
+    //
+    // When the caller of this function created the `comm_amount` from a public `u32` value, it is guaranteed that
+    // condition (2) from above holds, so no range proof is necessary. This happens when withdrawing a public
+    // amount from a veiled balance via `unveil_to` or `unveil`.
+
+    // Checks that the remaining balance is >= 0; i.e., range condition (3)
+    assert!(
+        bulletproofs::verify_range_proof_pedersen(
+            comm_new_balance,
+            zkrp_new_balance,
+            MAX_BITS_IN_VEILED_COIN_VALUE, VEILED_COIN_BULLETPROOFS_DST
+        ),
+        error::out_of_range(ERANGE_PROOF_VERIFICATION_FAILED)
+    );
+
+    // Checks that the transferred amount is in range (when this amount did not originate from a public amount); i.e., range condition (2)
+    if (zkrp_amount.is_some()) {
+        assert!(
+            bulletproofs::verify_range_proof_pedersen(
+                comm_amount.borrow(),
+                zkrp_amount.borrow(),
+                MAX_BITS_IN_VEILED_COIN_VALUE, VEILED_COIN_BULLETPROOFS_DST
+            ),
+            error::out_of_range(ERANGE_PROOF_VERIFICATION_FAILED)
+        );
+    };
+}
+
+ + + +
+ + + +## Function `get_resource_account_signer` + +Returns a signer for the resource account storing all the normal coins that have been veiled. + + +
fun get_resource_account_signer(): signer
+
+ + + +
+Implementation + + +
fun get_resource_account_signer(): signer acquires VeiledCoinMinter {
+    account::create_signer_with_capability(&borrow_global<VeiledCoinMinter>(@aptos_experimental).signer_cap)
+}
+
+ + + +
+ + + +## Function `veiled_mint_from_coin` + +Mints a veiled coin from a normal coin, shelving the normal coin into the resource account's coin store. + +**WARNING:** Fundamentally, there is no way to hide the value of the coin being minted here. + + +
fun veiled_mint_from_coin<CoinType>(c: coin::Coin<CoinType>): veiled_coin::VeiledCoin<CoinType>
+
+ + + +
+Implementation + + +
fun veiled_mint_from_coin<CoinType>(c: Coin<CoinType>): VeiledCoin<CoinType> acquires VeiledCoinMinter {
+    // If there is no `coin::CoinStore<CoinType>` in the resource account, create one.
+    let rsrc_acc_signer = get_resource_account_signer();
+    let rsrc_acc_addr = signer::address_of(&rsrc_acc_signer);
+    if (!coin::is_account_registered<CoinType>(rsrc_acc_addr)) {
+        coin::register<CoinType>(&rsrc_acc_signer);
+    };
+
+    // Move the normal coin into the coin store, so we can mint a veiled coin.
+    // (There is no other way to drop a normal coin, for safety reasons, so moving it into a coin store is
+    //  the only option.)
+    let value_u64 = coin::value(&c);
+    let value_u32 = clamp_u64_to_u32_amount(value_u64);
+
+    // Paranoid check: assert that the u64 coin value had only its middle 32 bits set (should be the case
+    // because the caller should have withdrawn a u32 amount, but enforcing this here anyway).
+    assert!(cast_u32_to_u64_amount(value_u32) == value_u64, error::internal(EINTERNAL_ERROR));
+
+    // Deposit a normal coin into the resource account...
+    coin::deposit(rsrc_acc_addr, c);
+
+    // ...and mint a veiled coin, which is backed by the normal coin
+    VeiledCoin<CoinType> {
+        veiled_amount: helpers::public_amount_to_veiled_balance(value_u32)
+    }
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/doc/veiled_coin_tests.md b/aptos-move/framework/aptos-experimental/doc/veiled_coin_tests.md new file mode 100644 index 0000000000000..e9253241e50a5 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc/veiled_coin_tests.md @@ -0,0 +1,13 @@ + + + +# Module `0x1337::veiled_coin_tests` + +Tests for veiled coins. + +TODO: test that payments to self return successfully (ideally, they should do nothing) + + + + +
diff --git a/aptos-move/framework/aptos-experimental/doc_template/overview.md b/aptos-move/framework/aptos-experimental/doc_template/overview.md new file mode 100644 index 0000000000000..eeeffd865a70b --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc_template/overview.md @@ -0,0 +1,7 @@ +# Aptos Experimental Framework + +This is the reference documentation of the Aptos experimental framework. + +## Index + +> {{move-index}} diff --git a/aptos-move/framework/aptos-experimental/doc_template/references.md b/aptos-move/framework/aptos-experimental/doc_template/references.md new file mode 100644 index 0000000000000..ad4748ca54059 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/doc_template/references.md @@ -0,0 +1 @@ +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-experimental/sources/benchmark_utils.move b/aptos-move/framework/aptos-experimental/sources/benchmark_utils.move new file mode 100644 index 0000000000000..5ff6796a12101 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/benchmark_utils.move @@ -0,0 +1,12 @@ +module aptos_experimental::benchmark_utils { + use aptos_framework::account; + use aptos_framework::aptos_account; + + /// Entry function that creates account resource, and funds the account. + /// This makes sure that transactions later don't need to create an account, + /// and so actual costs of entry functions can be more precisely measured. + entry fun transfer_and_create_account(source: &signer, to: address, amount: u64) { + account::create_account_if_does_not_exist(to); + aptos_account::transfer(source, to, amount); + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.move new file mode 100644 index 0000000000000..32c07f8c04f59 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.move @@ -0,0 +1,1141 @@ +/// This module implements the Confidential Asset (CA) Standard, a privacy-focused protocol for managing fungible assets (FA). +/// It enables private transfers by obfuscating token amounts while keeping sender and recipient addresses visible. +module aptos_experimental::confidential_asset { + use std::bcs; + use std::error; + use std::option::Option; + use std::signer; + use std::vector; + use aptos_std::ristretto255::Self; + use aptos_std::ristretto255_bulletproofs::Self as bulletproofs; + use aptos_std::string_utils; + use aptos_framework::chain_id; + use aptos_framework::coin; + use aptos_framework::event; + use aptos_framework::dispatchable_fungible_asset; + use aptos_framework::fungible_asset::{Metadata}; + use aptos_framework::object::{Self, ExtendRef, Object}; + use aptos_framework::primary_fungible_store; + use aptos_framework::system_addresses; + + use aptos_experimental::confidential_balance; + use aptos_experimental::confidential_proof::{ + Self, NormalizationProof, RotationProof, TransferProof, WithdrawalProof + }; + use aptos_experimental::ristretto255_twisted_elgamal as twisted_elgamal; + + #[test_only] + use aptos_std::ristretto255::Scalar; + + // + // Errors + // + + /// The range proof system does not support sufficient range. + const ERANGE_PROOF_SYSTEM_HAS_INSUFFICIENT_RANGE: u64 = 1; + + /// The confidential asset store has already been published for the given user-token pair. + const ECA_STORE_ALREADY_PUBLISHED: u64 = 2; + + /// The confidential asset store has not been published for the given user-token pair. + const ECA_STORE_NOT_PUBLISHED: u64 = 3; + + /// The deserialization of the auditor EK failed. + const EAUDITOR_EK_DESERIALIZATION_FAILED: u64 = 4; + + /// The sender is not the registered auditor. + const ENOT_AUDITOR: u64 = 5; + + /// The provided auditors or auditor proofs are invalid. + const EINVALID_AUDITORS: u64 = 6; + + /// The confidential asset account is already frozen. + const EALREADY_FROZEN: u64 = 7; + + /// The confidential asset account is not frozen. + const ENOT_FROZEN: u64 = 8; + + /// The pending balance must be zero for this operation. + const ENOT_ZERO_BALANCE: u64 = 9; + + /// The operation requires the actual balance to be normalized. + const ENORMALIZATION_REQUIRED: u64 = 10; + + /// The balance is already normalized and cannot be normalized again. + const EALREADY_NORMALIZED: u64 = 11; + + /// The token is already allowed for confidential transfers. + const ETOKEN_ENABLED: u64 = 12; + + /// The token is not allowed for confidential transfers. + const ETOKEN_DISABLED: u64 = 13; + + /// The allow list is already enabled. + const EALLOW_LIST_ENABLED: u64 = 14; + + /// The allow list is already disabled. + const EALLOW_LIST_DISABLED: u64 = 15; + + /// An internal error occurred, indicating unexpected behavior. + const EINTERNAL_ERROR: u64 = 16; + + /// Sender and recipient amounts encrypt different transfer amounts + const EINVALID_SENDER_AMOUNT: u64 = 17; + + // + // Constants + // + + /// The maximum number of transactions can be aggregated on the pending balance before rollover is required. + const MAX_TRANSFERS_BEFORE_ROLLOVER: u64 = 65534; + + /// The mainnet chain ID. If the chain ID is 1, the allow list is enabled. + const MAINNET_CHAIN_ID: u8 = 1; + + // + // Structs + // + + /// The `confidential_asset` module stores a `ConfidentialAssetStore` object for each user-token pair. + struct ConfidentialAssetStore has key { + /// Indicates if the account is frozen. If `true`, transactions are temporarily disabled + /// for this account. This is particularly useful during key rotations, which require + /// two transactions: rolling over the pending balance to the actual balance and rotating + /// the encryption key. Freezing prevents the user from accepting additional payments + /// between these two transactions. + frozen: bool, + + /// A flag indicating whether the actual balance is normalized. A normalized balance + /// ensures that all chunks fit within the defined 16-bit bounds, preventing overflows. + normalized: bool, + + /// Tracks the maximum number of transactions the user can accept before normalization + /// is required. For example, if the user can accept up to 2^16 transactions and each + /// chunk has a 16-bit limit, the maximum chunk value before normalization would be + /// 2^16 * 2^16 = 2^32. Maintaining this counter is crucial because users must solve + /// a discrete logarithm problem of this size to decrypt their balances. + pending_counter: u64, + + /// Stores the user's pending balance, which is used for accepting incoming payments. + /// Represented as four 16-bit chunks (p0 + 2^16 * p1 + 2^32 * p2 + 2^48 * p3), that can grow up to 32 bits. + /// All payments are accepted into this pending balance, which users must roll over into the actual balance + /// to perform transactions like withdrawals or transfers. + /// This separation helps protect against front-running attacks, where small incoming transfers could force + /// frequent regenerating of zk-proofs. + pending_balance: confidential_balance::CompressedConfidentialBalance, + + /// Represents the actual user balance, which is available for sending payments. + /// It consists of eight 16-bit chunks (p0 + 2^16 * p1 + ... + 2^112 * p8), supporting a 128-bit balance. + /// Users can decrypt this balance with their decryption keys and by solving a discrete logarithm problem. + actual_balance: confidential_balance::CompressedConfidentialBalance, + + /// The encryption key associated with the user's confidential asset account, different for each token. + ek: twisted_elgamal::CompressedPubkey, + } + + /// Represents the controller for the primary FA stores and `FAConfig` objects. + struct FAController has key { + /// Indicates whether the allow list is enabled. If `true`, only tokens from the allow list can be transferred. + /// This flag is managed by the governance module. + allow_list_enabled: bool, + + /// Used to derive a signer that owns all the FAs' primary stores and `FAConfig` objects. + extend_ref: ExtendRef + } + + /// Represents the configuration of a token. + struct FAConfig has key { + /// Indicates whether the token is allowed for confidential transfers. + /// If allow list is disabled, all tokens are allowed. + /// Can be toggled by the governance module. The withdrawals are always allowed. + allowed: bool, + + /// The auditor's public key for the token. If the auditor is not set, this field is `None`. + /// Otherwise, each confidential transfer must include the auditor as an additional party, + /// alongside the recipient, who has access to the decrypted transferred amount. + auditor_ek: Option, + } + + // + // Events + // + + #[event] + /// Emitted when tokens are brought into the protocol. + struct Deposited has drop, store { + from: address, + to: address, + amount: u64 + } + + #[event] + /// Emitted when tokens are brought out of the protocol. + struct Withdrawn has drop, store { + from: address, + to: address, + amount: u64 + } + + #[event] + /// Emitted when tokens are transferred within the protocol between users' confidential balances. + /// Note that a numeric amount is not included, as it is hidden. + struct Transferred has drop, store { + from: address, + to: address + } + + // + // Module initialization, done only once when this module is first published on the blockchain + // + + fun init_module(deployer: &signer) { + assert!( + bulletproofs::get_max_range_bits() >= confidential_proof::get_bulletproofs_num_bits(), + error::internal(ERANGE_PROOF_SYSTEM_HAS_INSUFFICIENT_RANGE) + ); + + let deployer_address = signer::address_of(deployer); + + let fa_controller_ctor_ref = &object::create_object(deployer_address); + + move_to(deployer, FAController { + allow_list_enabled: chain_id::get() == MAINNET_CHAIN_ID, + extend_ref: object::generate_extend_ref(fa_controller_ctor_ref), + }); + } + + // + // Entry functions + // + + /// Registers an account for a specified token. Users must register an account for each token they + /// intend to transact with. + /// + /// Users are also responsible for generating a Twisted ElGamal key pair on their side. + public entry fun register( + sender: &signer, + token: Object, + ek: vector) acquires FAController, FAConfig + { + let ek = twisted_elgamal::new_pubkey_from_bytes(ek).extract(); + + register_internal(sender, token, ek); + } + + /// Brings tokens into the protocol, transferring the passed amount from the sender's primary FA store + /// to the pending balance of the recipient. + /// The initial confidential balance is publicly visible, as entering the protocol requires a normal transfer. + /// However, tokens within the protocol become obfuscated through confidential transfers, ensuring privacy in + /// subsequent transactions. + public entry fun deposit_to( + sender: &signer, + token: Object, + to: address, + amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig + { + deposit_to_internal(sender, token, to, amount) + } + + /// The same as `deposit_to`, but the recipient is the sender. + public entry fun deposit( + sender: &signer, + token: Object, + amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig + { + deposit_to_internal(sender, token, signer::address_of(sender), amount) + } + + /// The same as `deposit_to`, but converts coins to missing FA first. + public entry fun deposit_coins_to( + sender: &signer, + to: address, + amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig + { + let token = ensure_sufficient_fa(sender, amount).extract(); + + deposit_to_internal(sender, token, to, amount) + } + + /// The same as `deposit`, but converts coins to missing FA first. + public entry fun deposit_coins( + sender: &signer, + amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig + { + let token = ensure_sufficient_fa(sender, amount).extract(); + + deposit_to_internal(sender, token, signer::address_of(sender), amount) + } + + /// Brings tokens out of the protocol by transferring the specified amount from the sender's actual balance to + /// the recipient's primary FA store. + /// The withdrawn amount is publicly visible, as this process requires a normal transfer. + /// The sender provides their new normalized confidential balance, encrypted with fresh randomness to preserve privacy. + public entry fun withdraw_to( + sender: &signer, + token: Object, + to: address, + amount: u64, + new_balance: vector, + zkrp_new_balance: vector, + sigma_proof: vector) acquires ConfidentialAssetStore, FAController + { + let new_balance = confidential_balance::new_actual_balance_from_bytes(new_balance).extract(); + let proof = confidential_proof::deserialize_withdrawal_proof(sigma_proof, zkrp_new_balance).extract(); + + withdraw_to_internal(sender, token, to, amount, new_balance, proof); + + event::emit(Withdrawn { from: signer::address_of(sender), to, amount }); + } + + /// The same as `withdraw_to`, but the recipient is the sender. + public entry fun withdraw( + sender: &signer, + token: Object, + amount: u64, + new_balance: vector, + zkrp_new_balance: vector, + sigma_proof: vector) acquires ConfidentialAssetStore, FAController + { + withdraw_to( + sender, + token, + signer::address_of(sender), + amount, + new_balance, + zkrp_new_balance, + sigma_proof + ) + } + + /// Transfers tokens from the sender's actual balance to the recipient's pending balance. + /// The function hides the transferred amount while keeping the sender and recipient addresses visible. + /// The sender encrypts the transferred amount with the recipient's encryption key and the function updates the + /// recipient's confidential balance homomorphically. + /// Additionally, the sender encrypts the transferred amount with the auditors' EKs, allowing auditors to decrypt + /// the it on their side. + /// The sender provides their new normalized confidential balance, encrypted with fresh randomness to preserve privacy. + /// Warning: If the auditor feature is enabled, the sender must include the auditor as the first element in the + /// `auditor_eks` vector. + public entry fun confidential_transfer( + sender: &signer, + token: Object, + to: address, + new_balance: vector, + sender_amount: vector, + recipient_amount: vector, + auditor_eks: vector, + auditor_amounts: vector, + zkrp_new_balance: vector, + zkrp_transfer_amount: vector, + sigma_proof: vector) acquires ConfidentialAssetStore, FAConfig, FAController + { + let new_balance = confidential_balance::new_actual_balance_from_bytes(new_balance).extract(); + let sender_amount = confidential_balance::new_pending_balance_from_bytes(sender_amount).extract(); + let recipient_amount = confidential_balance::new_pending_balance_from_bytes(recipient_amount).extract(); + let auditor_eks = deserialize_auditor_eks(auditor_eks).extract(); + let auditor_amounts = deserialize_auditor_amounts(auditor_amounts).extract(); + let proof = confidential_proof::deserialize_transfer_proof( + sigma_proof, + zkrp_new_balance, + zkrp_transfer_amount + ).extract(); + + confidential_transfer_internal( + sender, + token, + to, + new_balance, + sender_amount, + recipient_amount, + auditor_eks, + auditor_amounts, + proof + ) + } + + /// Rotates the encryption key for the user's confidential balance, updating it to a new encryption key. + /// The function ensures that the pending balance is zero before the key rotation, requiring the sender to + /// call `rollover_pending_balance_and_freeze` beforehand if necessary. + /// The sender provides their new normalized confidential balance, encrypted with the new encryption key and fresh randomness + /// to preserve privacy. + public entry fun rotate_encryption_key( + sender: &signer, + token: Object, + new_ek: vector, + new_balance: vector, + zkrp_new_balance: vector, + sigma_proof: vector) acquires ConfidentialAssetStore + { + let new_ek = twisted_elgamal::new_pubkey_from_bytes(new_ek).extract(); + let new_balance = confidential_balance::new_actual_balance_from_bytes(new_balance).extract(); + let proof = confidential_proof::deserialize_rotation_proof(sigma_proof, zkrp_new_balance).extract(); + + rotate_encryption_key_internal(sender, token, new_ek, new_balance, proof); + } + + /// Adjusts each chunk to fit into defined 16-bit bounds to prevent overflows. + /// Most functions perform implicit normalization by accepting a new normalized confidential balance as a parameter. + /// However, explicit normalization is required before rolling over the pending balance, as multiple rolls may cause + /// chunk overflows. + /// The sender provides their new normalized confidential balance, encrypted with fresh randomness to preserve privacy. + public entry fun normalize( + sender: &signer, + token: Object, + new_balance: vector, + zkrp_new_balance: vector, + sigma_proof: vector) acquires ConfidentialAssetStore + { + let new_balance = confidential_balance::new_actual_balance_from_bytes(new_balance).extract(); + let proof = confidential_proof::deserialize_normalization_proof(sigma_proof, zkrp_new_balance).extract(); + + normalize_internal(sender, token, new_balance, proof); + } + + /// Freezes the confidential account for the specified token, disabling all incoming transactions. + public entry fun freeze_token(sender: &signer, token: Object) acquires ConfidentialAssetStore { + freeze_token_internal(sender, token); + } + + /// Unfreezes the confidential account for the specified token, re-enabling incoming transactions. + public entry fun unfreeze_token(sender: &signer, token: Object) acquires ConfidentialAssetStore { + unfreeze_token_internal(sender, token); + } + + /// Adds the pending balance to the actual balance for the specified token, resetting the pending balance to zero. + /// This operation is necessary to use tokens from the pending balance for outgoing transactions. + public entry fun rollover_pending_balance( + sender: &signer, + token: Object) acquires ConfidentialAssetStore + { + rollover_pending_balance_internal(sender, token); + } + + /// Before calling `rotate_encryption_key`, we need to rollover the pending balance and freeze the token to prevent + /// any new payments being come. + public entry fun rollover_pending_balance_and_freeze( + sender: &signer, + token: Object) acquires ConfidentialAssetStore + { + rollover_pending_balance(sender, token); + freeze_token(sender, token); + } + + /// After rotating the encryption key, we may want to unfreeze the token to allow payments. + /// This function facilitates making both calls in a single transaction. + public entry fun rotate_encryption_key_and_unfreeze( + sender: &signer, + token: Object, + new_ek: vector, + new_confidential_balance: vector, + zkrp_new_balance: vector, + rotate_proof: vector) acquires ConfidentialAssetStore + { + rotate_encryption_key(sender, token, new_ek, new_confidential_balance, zkrp_new_balance, rotate_proof); + unfreeze_token(sender, token); + } + + // + // Public governance functions + // + + /// Enables the allow list, restricting confidential transfers to tokens on the allow list. + public fun enable_allow_list(aptos_framework: &signer) acquires FAController { + system_addresses::assert_aptos_framework(aptos_framework); + + let fa_controller = borrow_global_mut(@aptos_experimental); + + assert!(!fa_controller.allow_list_enabled, error::invalid_state(EALLOW_LIST_ENABLED)); + + fa_controller.allow_list_enabled = true; + } + + /// Disables the allow list, allowing confidential transfers for all tokens. + public fun disable_allow_list(aptos_framework: &signer) acquires FAController { + system_addresses::assert_aptos_framework(aptos_framework); + + let fa_controller = borrow_global_mut(@aptos_experimental); + + assert!(fa_controller.allow_list_enabled, error::invalid_state(EALLOW_LIST_DISABLED)); + + fa_controller.allow_list_enabled = false; + } + + /// Enables confidential transfers for the specified token. + public fun enable_token(aptos_framework: &signer, token: Object) acquires FAConfig, FAController { + system_addresses::assert_aptos_framework(aptos_framework); + + let fa_config = borrow_global_mut(ensure_fa_config_exists(token)); + + assert!(!fa_config.allowed, error::invalid_state(ETOKEN_ENABLED)); + + fa_config.allowed = true; + } + + /// Disables confidential transfers for the specified token. + public fun disable_token(aptos_framework: &signer, token: Object) acquires FAConfig, FAController { + system_addresses::assert_aptos_framework(aptos_framework); + + let fa_config = borrow_global_mut(ensure_fa_config_exists(token)); + + assert!(fa_config.allowed, error::invalid_state(ETOKEN_DISABLED)); + + fa_config.allowed = false; + } + + /// Sets the auditor's public key for the specified token. + public fun set_auditor( + aptos_framework: &signer, + token: Object, + new_auditor_ek: vector) acquires FAConfig, FAController + { + system_addresses::assert_aptos_framework(aptos_framework); + + let fa_config = borrow_global_mut(ensure_fa_config_exists(token)); + + fa_config.auditor_ek = if (new_auditor_ek.length() == 0) { + std::option::none() + } else { + let new_auditor_ek = twisted_elgamal::new_pubkey_from_bytes(new_auditor_ek); + assert!(new_auditor_ek.is_some(), error::invalid_argument(EAUDITOR_EK_DESERIALIZATION_FAILED)); + new_auditor_ek + }; + } + + // + // Public view functions + // + + #[view] + /// Checks if the user has a confidential asset store for the specified token. + public fun has_confidential_asset_store(user: address, token: Object): bool { + exists(get_user_address(user, token)) + } + + #[view] + /// Checks if the token is allowed for confidential transfers. + public fun is_token_allowed(token: Object): bool acquires FAController, FAConfig { + if (!is_allow_list_enabled()) { + return true + }; + + let fa_config_address = get_fa_config_address(token); + + if (!exists(fa_config_address)) { + return false + }; + + borrow_global(fa_config_address).allowed + } + + #[view] + /// Checks if the allow list is enabled. + /// If the allow list is enabled, only tokens from the allow list can be transferred. + /// Otherwise, all tokens are allowed. + public fun is_allow_list_enabled(): bool acquires FAController { + borrow_global(@aptos_experimental).allow_list_enabled + } + + #[view] + /// Returns the pending balance of the user for the specified token. + public fun pending_balance( + owner: address, + token: Object): confidential_balance::CompressedConfidentialBalance acquires ConfidentialAssetStore + { + assert!(has_confidential_asset_store(owner, token), error::not_found(ECA_STORE_NOT_PUBLISHED)); + + let ca_store = borrow_global(get_user_address(owner, token)); + + ca_store.pending_balance + } + + #[view] + /// Returns the actual balance of the user for the specified token. + public fun actual_balance( + owner: address, + token: Object): confidential_balance::CompressedConfidentialBalance acquires ConfidentialAssetStore + { + assert!(has_confidential_asset_store(owner, token), error::not_found(ECA_STORE_NOT_PUBLISHED)); + + let ca_store = borrow_global(get_user_address(owner, token)); + + ca_store.actual_balance + } + + #[view] + /// Returns the encryption key (EK) of the user for the specified token. + public fun encryption_key( + user: address, + token: Object): twisted_elgamal::CompressedPubkey acquires ConfidentialAssetStore + { + assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED)); + + borrow_global_mut(get_user_address(user, token)).ek + } + + #[view] + /// Checks if the user's actual balance is normalized for the specified token. + public fun is_normalized(user: address, token: Object): bool acquires ConfidentialAssetStore { + assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED)); + + borrow_global(get_user_address(user, token)).normalized + } + + #[view] + /// Checks if the user's confidential asset store is frozen for the specified token. + public fun is_frozen(user: address, token: Object): bool acquires ConfidentialAssetStore { + assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED)); + + borrow_global(get_user_address(user, token)).frozen + } + + #[view] + /// Returns the asset-specific auditor's encryption key. + /// If the auditing feature is disabled for the token, the encryption key is set to `None`. + public fun get_auditor( + token: Object): Option acquires FAConfig, FAController + { + let fa_config_address = get_fa_config_address(token); + + if (!is_allow_list_enabled() && !exists(fa_config_address)) { + return std::option::none(); + }; + + borrow_global(fa_config_address).auditor_ek + } + + #[view] + /// Returns the circulating supply of the confidential asset. + public fun confidential_asset_balance(token: Object): u64 acquires FAController { + let fa_store_address = get_fa_store_address(); + assert!(primary_fungible_store::primary_store_exists(fa_store_address, token), EINTERNAL_ERROR); + + primary_fungible_store::balance(fa_store_address, token) + } + + // + // Public functions that correspond to the entry functions and don't require serializtion of the input data. + // These function can be useful for external contracts that want to integrate with the Confidential Asset protocol. + // + + /// Implementation of the `register` entry function. + public fun register_internal( + sender: &signer, + token: Object, + ek: twisted_elgamal::CompressedPubkey) acquires FAController, FAConfig + { + assert!(is_token_allowed(token), error::invalid_argument(ETOKEN_DISABLED)); + + let user = signer::address_of(sender); + + assert!(!has_confidential_asset_store(user, token), error::already_exists(ECA_STORE_ALREADY_PUBLISHED)); + + let ca_store = ConfidentialAssetStore { + frozen: false, + normalized: true, + pending_counter: 0, + pending_balance: confidential_balance::new_compressed_pending_balance_no_randomness(), + actual_balance: confidential_balance::new_compressed_actual_balance_no_randomness(), + ek, + }; + + move_to(&get_user_signer(sender, token), ca_store); + } + + /// Implementation of the `deposit_to` entry function. + public fun deposit_to_internal( + sender: &signer, + token: Object, + to: address, + amount: u64) acquires ConfidentialAssetStore, FAController, FAConfig + { + assert!(is_token_allowed(token), error::invalid_argument(ETOKEN_DISABLED)); + assert!(!is_frozen(to, token), error::invalid_state(EALREADY_FROZEN)); + + let from = signer::address_of(sender); + + let sender_fa_store = primary_fungible_store::ensure_primary_store_exists(from, token); + let ca_fa_store = primary_fungible_store::ensure_primary_store_exists(get_fa_store_address(), token); + + dispatchable_fungible_asset::transfer(sender, sender_fa_store, ca_fa_store, amount); + + let ca_store = borrow_global_mut(get_user_address(to, token)); + let pending_balance = confidential_balance::decompress_balance(&ca_store.pending_balance); + + confidential_balance::add_balances_mut( + &mut pending_balance, + &confidential_balance::new_pending_balance_u64_no_randonmess(amount) + ); + + ca_store.pending_balance = confidential_balance::compress_balance(&pending_balance); + + assert!( + ca_store.pending_counter < MAX_TRANSFERS_BEFORE_ROLLOVER, + error::invalid_argument(EINTERNAL_ERROR) + ); + + ca_store.pending_counter += 1; + + event::emit(Deposited { from, to, amount }); + } + + /// Implementation of the `withdraw_to` entry function. + /// Withdrawals are always allowed, regardless of the token allow status. + public fun withdraw_to_internal( + sender: &signer, + token: Object, + to: address, + amount: u64, + new_balance: confidential_balance::ConfidentialBalance, + proof: WithdrawalProof) acquires ConfidentialAssetStore, FAController + { + let from = signer::address_of(sender); + + let sender_ek = encryption_key(from, token); + + let ca_store = borrow_global_mut(get_user_address(from, token)); + let current_balance = confidential_balance::decompress_balance(&ca_store.actual_balance); + + confidential_proof::verify_withdrawal_proof(&sender_ek, amount, ¤t_balance, &new_balance, &proof); + + ca_store.normalized = true; + ca_store.actual_balance = confidential_balance::compress_balance(&new_balance); + + primary_fungible_store::transfer(&get_fa_store_signer(), token, to, amount); + } + + /// Implementation of the `confidential_transfer` entry function. + public fun confidential_transfer_internal( + sender: &signer, + token: Object, + to: address, + new_balance: confidential_balance::ConfidentialBalance, + sender_amount: confidential_balance::ConfidentialBalance, + recipient_amount: confidential_balance::ConfidentialBalance, + auditor_eks: vector, + auditor_amounts: vector, + proof: TransferProof) acquires ConfidentialAssetStore, FAConfig, FAController + { + assert!(is_token_allowed(token), error::invalid_argument(ETOKEN_DISABLED)); + assert!(!is_frozen(to, token), error::invalid_state(EALREADY_FROZEN)); + assert!( + validate_auditors(token, &recipient_amount, &auditor_eks, &auditor_amounts, &proof), + error::invalid_argument(EINVALID_AUDITORS) + ); + assert!( + confidential_balance::balance_c_equals(&sender_amount, &recipient_amount), + error::invalid_argument(EINVALID_SENDER_AMOUNT) + ); + + let from = signer::address_of(sender); + + let sender_ek = encryption_key(from, token); + let recipient_ek = encryption_key(to, token); + + let sender_ca_store = borrow_global_mut(get_user_address(from, token)); + + let sender_current_actual_balance = confidential_balance::decompress_balance( + &sender_ca_store.actual_balance + ); + + confidential_proof::verify_transfer_proof( + &sender_ek, + &recipient_ek, + &sender_current_actual_balance, + &new_balance, + &sender_amount, + &recipient_amount, + &auditor_eks, + &auditor_amounts, + &proof); + + sender_ca_store.normalized = true; + sender_ca_store.actual_balance = confidential_balance::compress_balance(&new_balance); + + // Cannot create multiple mutable references to the same type, so we need to drop it + let ConfidentialAssetStore { .. } = sender_ca_store; + + let recipient_ca_store = borrow_global_mut(get_user_address(to, token)); + + assert!( + recipient_ca_store.pending_counter < MAX_TRANSFERS_BEFORE_ROLLOVER, + error::invalid_argument(EINTERNAL_ERROR) + ); + + let recipient_pending_balance = confidential_balance::decompress_balance( + &recipient_ca_store.pending_balance + ); + confidential_balance::add_balances_mut(&mut recipient_pending_balance, &recipient_amount); + + recipient_ca_store.pending_counter += 1; + recipient_ca_store.pending_balance = confidential_balance::compress_balance(&recipient_pending_balance); + + event::emit(Transferred { from, to }); + } + + /// Implementation of the `rotate_encryption_key` entry function. + public fun rotate_encryption_key_internal( + sender: &signer, + token: Object, + new_ek: twisted_elgamal::CompressedPubkey, + new_balance: confidential_balance::ConfidentialBalance, + proof: RotationProof) acquires ConfidentialAssetStore + { + let user = signer::address_of(sender); + let current_ek = encryption_key(user, token); + + let ca_store = borrow_global_mut(get_user_address(user, token)); + + let pending_balance = confidential_balance::decompress_balance(&ca_store.pending_balance); + + // We need to ensure that the pending balance is zero before rotating the key. + // To guarantee this, the user must call `rollover_pending_balance_and_freeze` beforehand. + assert!(confidential_balance::is_zero_balance(&pending_balance), error::invalid_state(ENOT_ZERO_BALANCE)); + + let current_balance = confidential_balance::decompress_balance(&ca_store.actual_balance); + + confidential_proof::verify_rotation_proof(¤t_ek, &new_ek, ¤t_balance, &new_balance, &proof); + + ca_store.ek = new_ek; + // We don't need to update the pending balance here, as it has been asserted to be zero. + ca_store.actual_balance = confidential_balance::compress_balance(&new_balance); + ca_store.normalized = true; + } + + /// Implementation of the `normalize` entry function. + public fun normalize_internal( + sender: &signer, + token: Object, + new_balance: confidential_balance::ConfidentialBalance, + proof: NormalizationProof) acquires ConfidentialAssetStore + { + let user = signer::address_of(sender); + let sender_ek = encryption_key(user, token); + + let ca_store = borrow_global_mut(get_user_address(user, token)); + + assert!(!ca_store.normalized, error::invalid_state(EALREADY_NORMALIZED)); + + let current_balance = confidential_balance::decompress_balance(&ca_store.actual_balance); + + confidential_proof::verify_normalization_proof(&sender_ek, ¤t_balance, &new_balance, &proof); + + ca_store.actual_balance = confidential_balance::compress_balance(&new_balance); + ca_store.normalized = true; + } + + /// Implementation of the `rollover_pending_balance` entry function. + public fun rollover_pending_balance_internal( + sender: &signer, + token: Object) acquires ConfidentialAssetStore + { + let user = signer::address_of(sender); + + assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED)); + + let ca_store = borrow_global_mut(get_user_address(user, token)); + + assert!(ca_store.normalized, error::invalid_state(ENORMALIZATION_REQUIRED)); + + let actual_balance = confidential_balance::decompress_balance(&ca_store.actual_balance); + let pending_balance = confidential_balance::decompress_balance(&ca_store.pending_balance); + + confidential_balance::add_balances_mut(&mut actual_balance, &pending_balance); + + ca_store.normalized = false; + ca_store.pending_counter = 0; + ca_store.actual_balance = confidential_balance::compress_balance(&actual_balance); + ca_store.pending_balance = confidential_balance::new_compressed_pending_balance_no_randomness(); + } + + /// Implementation of the `freeze_token` entry function. + public fun freeze_token_internal( + sender: &signer, + token: Object) acquires ConfidentialAssetStore + { + let user = signer::address_of(sender); + + assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED)); + + let ca_store = borrow_global_mut(get_user_address(user, token)); + + assert!(!ca_store.frozen, error::invalid_state(EALREADY_FROZEN)); + + ca_store.frozen = true; + } + + /// Implementation of the `unfreeze_token` entry function. + public fun unfreeze_token_internal( + sender: &signer, + token: Object) acquires ConfidentialAssetStore + { + let user = signer::address_of(sender); + + assert!(has_confidential_asset_store(user, token), error::not_found(ECA_STORE_NOT_PUBLISHED)); + + let ca_store = borrow_global_mut(get_user_address(user, token)); + + assert!(ca_store.frozen, error::invalid_state(ENOT_FROZEN)); + + ca_store.frozen = false; + } + + // + // Private functions. + // + + /// Ensures that the `FAConfig` object exists for the specified token. + /// If the object does not exist, creates it. + /// Used only for internal purposes. + fun ensure_fa_config_exists(token: Object): address acquires FAController { + let fa_config_address = get_fa_config_address(token); + + if (!exists(fa_config_address)) { + let fa_config_singer = get_fa_config_signer(token); + + move_to(&fa_config_singer, FAConfig { + allowed: false, + auditor_ek: std::option::none(), + }); + }; + + fa_config_address + } + + /// Returns an object for handling all the FA primary stores, and returns a signer for it. + fun get_fa_store_signer(): signer acquires FAController { + object::generate_signer_for_extending(&borrow_global(@aptos_experimental).extend_ref) + } + + /// Returns the address that handles all the FA primary stores. + fun get_fa_store_address(): address acquires FAController { + object::address_from_extend_ref(&borrow_global(@aptos_experimental).extend_ref) + } + + /// Returns an object for handling the `ConfidentialAssetStore` and returns a signer for it. + fun get_user_signer(user: &signer, token: Object): signer { + let user_ctor = &object::create_named_object(user, construct_user_seed(token)); + + object::generate_signer(user_ctor) + } + + /// Returns the address that handles the user's `ConfidentialAssetStore` object for the specified user and token. + fun get_user_address(user: address, token: Object): address { + object::create_object_address(&user, construct_user_seed(token)) + } + + /// Returns an object for handling the `FAConfig`, and returns a signer for it. + fun get_fa_config_signer(token: Object): signer acquires FAController { + let fa_ext = &borrow_global(@aptos_experimental).extend_ref; + let fa_ext_signer = object::generate_signer_for_extending(fa_ext); + + let fa_ctor = &object::create_named_object(&fa_ext_signer, construct_fa_seed(token)); + + object::generate_signer(fa_ctor) + } + + /// Returns the address that handles primary FA store and `FAConfig` objects for the specified token. + fun get_fa_config_address(token: Object): address acquires FAController { + let fa_ext = &borrow_global(@aptos_experimental).extend_ref; + let fa_ext_address = object::address_from_extend_ref(fa_ext); + + object::create_object_address(&fa_ext_address, construct_fa_seed(token)) + } + + /// Constructs a unique seed for the user's `ConfidentialAssetStore` object. + /// As all the `ConfidentialAssetStore`'s have the same type, we need to differentiate them by the seed. + fun construct_user_seed(token: Object): vector { + bcs::to_bytes( + &string_utils::format2( + &b"confidential_asset::{}::token::{}::user", + @aptos_experimental, + object::object_address(&token) + ) + ) + } + + /// Constructs a unique seed for the FA's `FAConfig` object. + /// As all the `FAConfig`'s have the same type, we need to differentiate them by the seed. + fun construct_fa_seed(token: Object): vector { + bcs::to_bytes( + &string_utils::format2( + &b"confidential_asset::{}::token::{}::fa", + @aptos_experimental, + object::object_address(&token) + ) + ) + } + + /// Validates that the auditor-related fields in the confidential transfer are correct. + /// Returns `false` if the transfer amount is not the same as the auditor amounts. + /// Returns `false` if the number of auditors in the transfer proof and auditor lists do not match. + /// Returns `false` if the first auditor in the list and the asset-specific auditor do not match. + /// Note: If the asset-specific auditor is not set, the validation is successful for any list of auditors. + /// Otherwise, returns `true`. + fun validate_auditors( + token: Object, + transfer_amount: &confidential_balance::ConfidentialBalance, + auditor_eks: &vector, + auditor_amounts: &vector, + proof: &TransferProof): bool acquires FAConfig, FAController + { + if ( + !auditor_amounts.all(|auditor_amount| { + confidential_balance::balance_c_equals(transfer_amount, auditor_amount) + }) + ) { + return false + }; + + if ( + auditor_eks.length() != auditor_amounts.length() || + auditor_eks.length() != confidential_proof::auditors_count_in_transfer_proof(proof) + ) { + return false + }; + + let asset_auditor_ek = get_auditor(token); + if (asset_auditor_ek.is_none()) { + return true + }; + + if (auditor_eks.length() == 0) { + return false + }; + + let asset_auditor_ek = twisted_elgamal::pubkey_to_point(&asset_auditor_ek.extract()); + let first_auditor_ek = twisted_elgamal::pubkey_to_point(&auditor_eks[0]); + + ristretto255::point_equals(&asset_auditor_ek, &first_auditor_ek) + } + + /// Deserializes the auditor EKs from a byte array. + /// Returns `Some(vector)` if the deserialization is successful, otherwise `None`. + fun deserialize_auditor_eks( + auditor_eks_bytes: vector): Option> + { + if (auditor_eks_bytes.length() % 32 != 0) { + return std::option::none() + }; + + let auditors_count = auditor_eks_bytes.length() / 32; + + let auditor_eks = vector::range(0, auditors_count).map(|i| { + twisted_elgamal::new_pubkey_from_bytes(auditor_eks_bytes.slice(i * 32, (i + 1) * 32)) + }); + + if (auditor_eks.any(|ek| ek.is_none())) { + return std::option::none() + }; + + std::option::some(auditor_eks.map(|ek| ek.extract())) + } + + /// Deserializes the auditor amounts from a byte array. + /// Returns `Some(vector)` if the deserialization is successful, otherwise `None`. + fun deserialize_auditor_amounts( + auditor_amounts_bytes: vector): Option> + { + if (auditor_amounts_bytes.length() % 256 != 0) { + return std::option::none() + }; + + let auditors_count = auditor_amounts_bytes.length() / 256; + + let auditor_amounts = vector::range(0, auditors_count).map(|i| { + confidential_balance::new_pending_balance_from_bytes(auditor_amounts_bytes.slice(i * 256, (i + 1) * 256)) + }); + + if (auditor_amounts.any(|ek| ek.is_none())) { + return std::option::none() + }; + + std::option::some(auditor_amounts.map(|balance| balance.extract())) + } + + /// Converts coins to missing FA. + /// Returns `Some(Object)` if user has a suffucient amount of FA to proceed, otherwise `None`. + fun ensure_sufficient_fa(sender: &signer, amount: u64): Option> { + let user = signer::address_of(sender); + let fa = coin::paired_metadata(); + + if (fa.is_none()) { + return fa; + }; + + let fa_balance = primary_fungible_store::balance(user, *fa.borrow()); + + if (fa_balance >= amount) { + return fa; + }; + + if (coin::balance(user) < amount) { + return std::option::none(); + }; + + let coin_amount = coin::withdraw(sender, amount - fa_balance); + let fa_amount = coin::coin_to_fungible_asset(coin_amount); + + primary_fungible_store::deposit(user, fa_amount); + + fa + } + + // + // Test-only functions + // + + #[test_only] + public fun init_module_for_testing(deployer: &signer) { + init_module(deployer) + } + + #[test_only] + public fun verify_pending_balance( + user: address, + token: Object, + user_dk: &Scalar, + amount: u64): bool acquires ConfidentialAssetStore + { + let ca_store = borrow_global(get_user_address(user, token)); + let pending_balance = confidential_balance::decompress_balance(&ca_store.pending_balance); + + confidential_balance::verify_pending_balance(&pending_balance, user_dk, amount) + } + + #[test_only] + public fun verify_actual_balance( + user: address, + token: Object, + user_dk: &Scalar, + amount: u128): bool acquires ConfidentialAssetStore + { + let ca_store = borrow_global(get_user_address(user, token)); + let actual_balance = confidential_balance::decompress_balance(&ca_store.actual_balance); + + confidential_balance::verify_actual_balance(&actual_balance, user_dk, amount) + } + + #[test_only] + public fun serialize_auditor_eks(auditor_eks: &vector): vector { + let auditor_eks_bytes = vector[]; + + auditor_eks.for_each_ref(|auditor| { + auditor_eks_bytes.append(twisted_elgamal::pubkey_to_bytes(auditor)); + }); + + auditor_eks_bytes + } + + #[test_only] + public fun serialize_auditor_amounts( + auditor_amounts: &vector + ): vector { + let auditor_amounts_bytes = vector[]; + + auditor_amounts.for_each_ref(|balance| { + auditor_amounts_bytes.append(confidential_balance::balance_to_bytes(balance)); + }); + + auditor_amounts_bytes + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.spec.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.spec.move new file mode 100644 index 0000000000000..8ac5d79cbd9db --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_asset.spec.move @@ -0,0 +1,2 @@ +spec aptos_experimental::confidential_asset { +} diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_balance.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_balance.move new file mode 100644 index 0000000000000..32e847ce64275 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_balance.move @@ -0,0 +1,386 @@ +/// This module implements a Confidential Balance abstraction, built on top of Twisted ElGamal encryption, +/// over the Ristretto255 curve. +/// +/// The Confidential Balance encapsulates encrypted representations of a balance, split into chunks and stored as pairs of +/// ciphertext components `(C_i, D_i)` under basepoints `G` and `H` and an encryption key `P = dk^(-1) * H`, where `dk` +/// is the corresponding decryption key. Each pair represents an encrypted value `a_i` - the `i`-th 16-bit portion of +/// the total encrypted amount - and its associated randomness `r_i`, such that `C_i = a_i * G + r_i * H` and `D_i = r_i * P`. +/// +/// The module supports two types of balances: +/// - Pending balances are represented by four ciphertext pairs `(C_i, D_i), i = 1..4`, suitable for 64-bit values. +/// - Actual balances are represented by eight ciphertext pairs `(C_i, D_i), i = 1..8`, capable of handling 128-bit values. +/// +/// This implementation leverages the homomorphic properties of Twisted ElGamal encryption to allow arithmetic operations +/// directly on encrypted data. +module aptos_experimental::confidential_balance { + use std::error; + use std::option::{Self, Option}; + use std::vector; + use aptos_std::ristretto255::{Self, RistrettoPoint, Scalar}; + + use aptos_experimental::ristretto255_twisted_elgamal as twisted_elgamal; + + // + // Errors + // + + /// An internal error occurred, indicating unexpected behavior. + const EINTERNAL_ERROR: u64 = 1; + + // + // Contants + // + + /// The number of chunks in a pending balance. + const PENDING_BALANCE_CHUNKS: u64 = 4; + /// The number of chunks in an actual balance. + const ACTUAL_BALANCE_CHUNKS: u64 = 8; + /// The number of bits in a single chunk. + const CHUNK_SIZE_BITS: u64 = 16; + + // + // Structs + // + + /// Represents a compressed confidential balance, where each chunk is a compressed Twisted ElGamal ciphertext. + struct CompressedConfidentialBalance has store, drop, copy { + chunks: vector, + } + + /// Represents a confidential balance, where each chunk is a Twisted ElGamal ciphertext. + struct ConfidentialBalance has drop { + chunks: vector, + } + + // + // Public functions + // + + /// Creates a new zero pending balance, where each chunk is set to zero Twisted ElGamal ciphertext. + public fun new_pending_balance_no_randomness(): ConfidentialBalance { + ConfidentialBalance { + chunks: vector::range(0, PENDING_BALANCE_CHUNKS).map(|_| { + twisted_elgamal::ciphertext_from_points(ristretto255::point_identity(), ristretto255::point_identity()) + }) + } + } + + /// Creates a new zero actual balance, where each chunk is set to zero Twisted ElGamal ciphertext. + public fun new_actual_balance_no_randomness(): ConfidentialBalance { + ConfidentialBalance { + chunks: vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|_| { + twisted_elgamal::ciphertext_from_points(ristretto255::point_identity(), ristretto255::point_identity()) + }) + } + } + + /// Creates a new compressed zero pending balance, where each chunk is set to compressed zero Twisted ElGamal ciphertext. + public fun new_compressed_pending_balance_no_randomness(): CompressedConfidentialBalance { + CompressedConfidentialBalance { + chunks: vector::range(0, PENDING_BALANCE_CHUNKS).map(|_| { + twisted_elgamal::ciphertext_from_compressed_points( + ristretto255::point_identity_compressed(), ristretto255::point_identity_compressed()) + }) + } + } + + /// Creates a new compressed zero actual balance, where each chunk is set to compressed zero Twisted ElGamal ciphertext. + public fun new_compressed_actual_balance_no_randomness(): CompressedConfidentialBalance { + CompressedConfidentialBalance { + chunks: vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|_| { + twisted_elgamal::ciphertext_from_compressed_points( + ristretto255::point_identity_compressed(), ristretto255::point_identity_compressed()) + }) + } + } + + /// Creates a new pending balance from a 64-bit amount with no randomness, splitting the amount into four 16-bit chunks. + public fun new_pending_balance_u64_no_randonmess(amount: u64): ConfidentialBalance { + ConfidentialBalance { + chunks: split_into_chunks_u64(amount).map(|chunk| { + twisted_elgamal::new_ciphertext_no_randomness(&chunk) + }) + } + } + + /// Creates a new pending balance from a serialized byte array representation. + /// Returns `Some(ConfidentialBalance)` if deserialization succeeds, otherwise `None`. + public fun new_pending_balance_from_bytes(bytes: vector): Option { + if (bytes.length() != 64 * PENDING_BALANCE_CHUNKS) { + return std::option::none() + }; + + let chunks = vector::range(0, PENDING_BALANCE_CHUNKS).map(|i| { + twisted_elgamal::new_ciphertext_from_bytes(bytes.slice(i * 64, (i + 1) * 64)) + }); + + if (chunks.any(|chunk| chunk.is_none())) { + return std::option::none() + }; + + option::some(ConfidentialBalance { + chunks: chunks.map(|chunk| chunk.extract()) + }) + } + + /// Creates a new actual balance from a serialized byte array representation. + /// Returns `Some(ConfidentialBalance)` if deserialization succeeds, otherwise `None`. + public fun new_actual_balance_from_bytes(bytes: vector): Option { + if (bytes.length() != 64 * ACTUAL_BALANCE_CHUNKS) { + return std::option::none() + }; + + let chunks = vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|i| { + twisted_elgamal::new_ciphertext_from_bytes(bytes.slice(i * 64, (i + 1) * 64)) + }); + + if (chunks.any(|chunk| chunk.is_none())) { + return std::option::none() + }; + + option::some(ConfidentialBalance { + chunks: chunks.map(|chunk| chunk.extract()) + }) + } + + /// Compresses a confidential balance into its `CompressedConfidentialBalance` representation. + public fun compress_balance(balance: &ConfidentialBalance): CompressedConfidentialBalance { + CompressedConfidentialBalance { + chunks: balance.chunks.map_ref(|ciphertext| twisted_elgamal::compress_ciphertext(ciphertext)) + } + } + + /// Decompresses a compressed confidential balance into its `ConfidentialBalance` representation. + public fun decompress_balance(balance: &CompressedConfidentialBalance): ConfidentialBalance { + ConfidentialBalance { + chunks: balance.chunks.map_ref(|ciphertext| twisted_elgamal::decompress_ciphertext(ciphertext)) + } + } + + /// Serializes a confidential balance into a byte array representation. + public fun balance_to_bytes(balance: &ConfidentialBalance): vector { + let bytes = vector[]; + + balance.chunks.for_each_ref(|ciphertext| { + bytes.append(twisted_elgamal::ciphertext_to_bytes(ciphertext)); + }); + + bytes + } + + /// Extracts the `C` value component (`a * H + r * G`) of each chunk in a confidential balance as a vector of `RistrettoPoint`s. + public fun balance_to_points_c(balance: &ConfidentialBalance): vector { + balance.chunks.map_ref(|chunk| { + let (c, _) = twisted_elgamal::ciphertext_as_points(chunk); + ristretto255::point_clone(c) + }) + } + + /// Extracts the `D` randomness component (`r * Y`) of each chunk in a confidential balance as a vector of `RistrettoPoint`s. + public fun balance_to_points_d(balance: &ConfidentialBalance): vector { + balance.chunks.map_ref(|chunk| { + let (_, d) = twisted_elgamal::ciphertext_as_points(chunk); + ristretto255::point_clone(d) + }) + } + + /// Adds two confidential balances homomorphically, mutating the first balance in place. + /// The second balance must have fewer or equal chunks compared to the first. + public fun add_balances_mut(lhs: &mut ConfidentialBalance, rhs: &ConfidentialBalance) { + assert!(lhs.chunks.length() >= rhs.chunks.length(), error::internal(EINTERNAL_ERROR)); + + lhs.chunks.enumerate_mut(|i, chunk| { + if (i < rhs.chunks.length()) { + twisted_elgamal::ciphertext_add_assign(chunk, &rhs.chunks[i]) + } + }) + } + + /// Subtracts one confidential balance from another homomorphically, mutating the first balance in place. + /// The second balance must have fewer or equal chunks compared to the first. + public fun sub_balances_mut(lhs: &mut ConfidentialBalance, rhs: &ConfidentialBalance) { + assert!(lhs.chunks.length() >= rhs.chunks.length(), error::internal(EINTERNAL_ERROR)); + + lhs.chunks.enumerate_mut(|i, chunk| { + if (i < rhs.chunks.length()) { + twisted_elgamal::ciphertext_add_assign(chunk, &rhs.chunks[i]) + } + }) + } + + /// Checks if two confidential balances are equivalent, including both value and randomness components. + public fun balance_equals(lhs: &ConfidentialBalance, rhs: &ConfidentialBalance): bool { + assert!(lhs.chunks.length() == rhs.chunks.length(), error::internal(EINTERNAL_ERROR)); + + let ok = true; + + lhs.chunks.zip_ref(&rhs.chunks, |l, r| { + ok = ok && twisted_elgamal::ciphertext_equals(l, r); + }); + + ok + } + + /// Checks if the corresponding value components (`C`) of two confidential balances are equivalent. + public fun balance_c_equals(lhs: &ConfidentialBalance, rhs: &ConfidentialBalance): bool { + assert!(lhs.chunks.length() == rhs.chunks.length(), error::internal(EINTERNAL_ERROR)); + + let ok = true; + + lhs.chunks.zip_ref(&rhs.chunks, |l, r| { + let (lc, _) = twisted_elgamal::ciphertext_as_points(l); + let (rc, _) = twisted_elgamal::ciphertext_as_points(r); + + ok = ok && ristretto255::point_equals(lc, rc); + }); + + ok + } + + /// Checks if a confidential balance is equivalent to zero, where all chunks are the identity element. + public fun is_zero_balance(balance: &ConfidentialBalance): bool { + balance.chunks.all(|chunk| { + twisted_elgamal::ciphertext_equals( + chunk, + &twisted_elgamal::ciphertext_from_points(ristretto255::point_identity(), ristretto255::point_identity()) + ) + }) + } + + /// Splits a 64-bit integer amount into four 16-bit chunks, represented as `Scalar` values. + public fun split_into_chunks_u64(amount: u64): vector { + vector::range(0, PENDING_BALANCE_CHUNKS).map(|i| { + ristretto255::new_scalar_from_u64(amount >> (i * CHUNK_SIZE_BITS as u8) & 0xffff) + }) + } + + /// Splits a 128-bit integer amount into eight 16-bit chunks, represented as `Scalar` values. + public fun split_into_chunks_u128(amount: u128): vector { + vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|i| { + ristretto255::new_scalar_from_u128(amount >> (i * CHUNK_SIZE_BITS as u8) & 0xffff) + }) + } + + // + // View functions + // + + #[view] + /// Returns the number of chunks in a pending balance. + public fun get_pending_balance_chunks(): u64 { + PENDING_BALANCE_CHUNKS + } + + #[view] + /// Returns the number of chunks in an actual balance. + public fun get_actual_balance_chunks(): u64 { + ACTUAL_BALANCE_CHUNKS + } + + #[view] + /// Returns the number of bits in a single chunk. + public fun get_chunk_size_bits(): u64 { + CHUNK_SIZE_BITS + } + + // + // Test-only + // + + #[test_only] + /// A helper struct for generating randomness for confidential balances in test environments. + /// Each `r` element represents a random scalar used for Twisted ElGamal encryption. + /// Can be used to generate both actual and pending balances. + struct ConfidentialBalanceRandomness has drop { + r: vector, + } + + #[test_only] + /// Generates a `ConfidentialBalanceRandomness` instance containing four random scalars. + /// This is useful for creating randomness for actual balances during testing. + public fun generate_balance_randomness(): ConfidentialBalanceRandomness { + ConfidentialBalanceRandomness { + r: vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|_| ristretto255::random_scalar()) + } + } + + #[test_only] + /// Returns a reference to the vector of random scalars within the provided `ConfidentialBalanceRandomness`. + public fun balance_randomness_as_scalars(randomness: &ConfidentialBalanceRandomness): &vector { + &randomness.r + } + + #[test_only] + /// Creates a new actual balance from a 128-bit amount using the provided randomness and encryption key. + /// Splits the amount into eight 16-bit chunks and encrypts each chunk with the corresponding random scalar. + public fun new_actual_balance_from_u128( + amount: u128, + randomness: &ConfidentialBalanceRandomness, + ek: &twisted_elgamal::CompressedPubkey): ConfidentialBalance + { + let amount_chunks = split_into_chunks_u128(amount); + + ConfidentialBalance { + chunks: vector::range(0, ACTUAL_BALANCE_CHUNKS).map(|i| { + twisted_elgamal::new_ciphertext_with_basepoint(&amount_chunks[i], &randomness.r[i], ek) + }) + } + } + + #[test_only] + /// Creates a new pending balance from a 64-bit amount using the provided randomness and encryption key. + /// Splits the amount into four 16-bit chunks and encrypts each chunk with the corresponding random scalar. + public fun new_pending_balance_from_u64( + amount: u64, + randomness: &ConfidentialBalanceRandomness, + ek: &twisted_elgamal::CompressedPubkey): ConfidentialBalance + { + let amount_chunks = split_into_chunks_u64(amount); + + ConfidentialBalance { + chunks: vector::range(0, PENDING_BALANCE_CHUNKS).map(|i| { + twisted_elgamal::new_ciphertext_with_basepoint(&amount_chunks[i], &randomness.r[i], ek) + }) + } + } + + #[test_only] + /// Verifies that an actual balance encrypts the specified 128-bit amount using the provided decryption key. + /// Checks that the decryption of each chunk matches the corresponding 16-bit chunk of the provided amount. + /// Use carefully, as it may return `false` if the balance is not normalized (i.e. has overflowed chunks). + public fun verify_actual_balance(balance: &ConfidentialBalance, dk: &Scalar, amount: u128): bool { + assert!(balance.chunks.length() == ACTUAL_BALANCE_CHUNKS, error::internal(EINTERNAL_ERROR)); + + let amount_chunks = split_into_chunks_u128(amount); + let ok = true; + + balance.chunks.zip_ref(&amount_chunks, |balance, amount| { + let (balance_c, balance_d) = twisted_elgamal::ciphertext_as_points(balance); + let point_amount = ristretto255::point_sub(balance_c, &ristretto255::point_mul(balance_d, dk)); + + ok = ok && ristretto255::point_equals(&point_amount, &ristretto255::basepoint_mul(amount)); + }); + + ok + } + + #[test_only] + /// Verifies that a pending balance encrypts the specified 64-bit amount using the provided decryption key. + /// Checks that the decryption of each chunk matches the corresponding 16-bit chunk of the provided amount. + /// Use carefully, as it may return `false` if the balance is not normalized (i.e. has overflowed chunks). + public fun verify_pending_balance(balance: &ConfidentialBalance, dk: &Scalar, amount: u64): bool { + assert!(balance.chunks.length() == PENDING_BALANCE_CHUNKS, error::internal(EINTERNAL_ERROR)); + + let amount_chunks = split_into_chunks_u64(amount); + let ok = true; + + balance.chunks.zip_ref(&amount_chunks, |balance, amount| { + let (balance_c, balance_d) = twisted_elgamal::ciphertext_as_points(balance); + let point_amount = ristretto255::point_sub(balance_c, &ristretto255::point_mul(balance_d, dk)); + + ok = ok && ristretto255::point_equals(&point_amount, &ristretto255::basepoint_mul(amount)); + }); + + ok + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_balance.spec.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_balance.spec.move new file mode 100644 index 0000000000000..fc1eeb6d6cb5f --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_balance.spec.move @@ -0,0 +1,2 @@ +spec aptos_experimental::confidential_balance { +} diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_proof.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_proof.move new file mode 100644 index 0000000000000..866e7323132d5 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_proof.move @@ -0,0 +1,2095 @@ +/// The `confidential_proof` module provides the infrastructure for verifying zero-knowledge proofs used in the Confidential Asset protocol. +/// These proofs ensure correctness for operations such as `confidential_transfer`, `withdraw`, `rotate_encryption_key`, and `normalize`. +module aptos_experimental::confidential_proof { + use std::error; + use std::option; + use std::option::Option; + use std::vector; + use aptos_std::ristretto255::{Self, CompressedRistretto, Scalar}; + use aptos_std::ristretto255_bulletproofs::{Self as bulletproofs, RangeProof}; + + use aptos_experimental::confidential_balance; + use aptos_experimental::ristretto255_twisted_elgamal as twisted_elgamal; + + friend aptos_experimental::confidential_asset; + + // + // Errors + // + + const ESIGMA_PROTOCOL_VERIFY_FAILED: u64 = 1; + const ERANGE_PROOF_VERIFICATION_FAILED: u64 = 2; + + // + // Constants + // + + const FIAT_SHAMIR_WITHDRAWAL_SIGMA_DST: vector = b"AptosConfidentialAsset/WithdrawalProofFiatShamir"; + const FIAT_SHAMIR_TRANSFER_SIGMA_DST: vector = b"AptosConfidentialAsset/TransferProofFiatShamir"; + const FIAT_SHAMIR_ROTATION_SIGMA_DST: vector = b"AptosConfidentialAsset/RotationProofFiatShamir"; + const FIAT_SHAMIR_NORMALIZATION_SIGMA_DST: vector = b"AptosConfidentialAsset/NormalizationProofFiatShamir"; + + const BULLETPROOFS_DST: vector = b"AptosConfidentialAsset/BulletproofRangeProof"; + const BULLETPROOFS_NUM_BITS: u64 = 16; + + // + // Structs + // + + /// Represents the proof structure for validating a withdrawal operation. + struct WithdrawalProof has drop { + /// Sigma proof ensuring that the withdrawal operation maintains balance integrity. + sigma_proof: WithdrawalSigmaProof, + /// Range proof ensuring that the resulting balance chunks are normalized (i.e., within the 16-bit limit). + zkrp_new_balance: RangeProof, + } + + /// Represents the proof structure for validating a transfer operation. + struct TransferProof has drop { + /// Sigma proof ensuring that the transfer operation maintains balance integrity and correctness. + sigma_proof: TransferSigmaProof, + /// Range proof ensuring that the resulting balance chunks for the sender are normalized (i.e., within the 16-bit limit). + zkrp_new_balance: RangeProof, + /// Range proof ensuring that the transferred amount chunks are normalized (i.e., within the 16-bit limit). + zkrp_transfer_amount: RangeProof, + } + + /// Represents the proof structure for validating a normalization operation. + struct NormalizationProof has drop { + /// Sigma proof ensuring that the normalization operation maintains balance integrity. + sigma_proof: NormalizationSigmaProof, + /// Range proof ensuring that the resulting balance chunks are normalized (i.e., within the 16-bit limit). + zkrp_new_balance: RangeProof, + } + + /// Represents the proof structure for validating a key rotation operation. + struct RotationProof has drop { + /// Sigma proof ensuring that the key rotation operation preserves balance integrity. + sigma_proof: RotationSigmaProof, + /// Range proof ensuring that the resulting balance chunks after key rotation are normalized (i.e., within the 16-bit limit). + zkrp_new_balance: RangeProof, + } + + // + // Helper structs + // + + struct WithdrawalSigmaProofXs has drop { + x1: CompressedRistretto, + x2: CompressedRistretto, + x3s: vector, + x4s: vector, + } + + struct WithdrawalSigmaProofAlphas has drop { + a1s: vector, + a2: Scalar, + a3: Scalar, + a4s: vector, + } + + struct WithdrawalSigmaProofGammas has drop { + g1: Scalar, + g2: Scalar, + g3s: vector, + g4s: vector, + } + + struct WithdrawalSigmaProof has drop { + alphas: WithdrawalSigmaProofAlphas, + xs: WithdrawalSigmaProofXs, + } + + struct TransferSigmaProofXs has drop { + x1: CompressedRistretto, + x2s: vector, + x3s: vector, + x4s: vector, + x5: CompressedRistretto, + x6s: vector, + x7s: vector>, + x8s: vector, + } + + struct TransferSigmaProofAlphas has drop { + a1s: vector, + a2: Scalar, + a3s: vector, + a4s: vector, + a5: Scalar, + a6s: vector, + } + + struct TransferSigmaProofGammas has drop { + g1: Scalar, + g2s: vector, + g3s: vector, + g4s: vector, + g5: Scalar, + g6s: vector, + g7s: vector>, + g8s: vector, + } + + struct TransferSigmaProof has drop { + alphas: TransferSigmaProofAlphas, + xs: TransferSigmaProofXs, + } + + struct NormalizationSigmaProofXs has drop { + x1: CompressedRistretto, + x2: CompressedRistretto, + x3s: vector, + x4s: vector, + } + + struct NormalizationSigmaProofAlphas has drop { + a1s: vector, + a2: Scalar, + a3: Scalar, + a4s: vector, + } + + struct NormalizationSigmaProofGammas has drop { + g1: Scalar, + g2: Scalar, + g3s: vector, + g4s: vector, + } + + struct NormalizationSigmaProof has drop { + alphas: NormalizationSigmaProofAlphas, + xs: NormalizationSigmaProofXs, + } + + struct RotationSigmaProofXs has drop { + x1: CompressedRistretto, + x2: CompressedRistretto, + x3: CompressedRistretto, + x4s: vector, + x5s: vector, + } + + struct RotationSigmaProofAlphas has drop { + a1s: vector, + a2: Scalar, + a3: Scalar, + a4: Scalar, + a5s: vector, + } + + struct RotationSigmaProofGammas has drop { + g1: Scalar, + g2: Scalar, + g3: Scalar, + g4s: vector, + g5s: vector, + } + + struct RotationSigmaProof has drop { + alphas: RotationSigmaProofAlphas, + xs: RotationSigmaProofXs, + } + + // + // Proof verification functions + // + + /// Verifies the validity of the `withdraw` operation. + /// + /// This function ensures that the provided proof (`WithdrawalProof`) meets the following conditions: + /// 1. The current balance (`current_balance`) and new balance (`new_balance`) encrypt the corresponding values + /// under the same encryption key (`ek`) before and after the withdrawal of the specified amount (`amount`), respectively. + /// 2. The relationship `new_balance = current_balance - amount` holds, verifying that the withdrawal amount is deducted correctly. + /// 3. The new balance (`new_balance`) is normalized, with each chunk adhering to the range [0, 2^16). + /// + /// If all conditions are satisfied, the proof validates the withdrawal; otherwise, the function causes an error. + public fun verify_withdrawal_proof( + ek: &twisted_elgamal::CompressedPubkey, + amount: u64, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + proof: &WithdrawalProof) + { + verify_withdrawal_sigma_proof(ek, amount, current_balance, new_balance, &proof.sigma_proof); + verify_new_balance_range_proof(new_balance, &proof.zkrp_new_balance); + } + + /// Verifies the validity of the `confidential_transfer` operation. + /// + /// This function ensures that the provided proof (`TransferProof`) meets the following conditions: + /// 1. The transferred amount (`recipient_amount` and `sender_amount`) and the auditors' amounts + /// (`auditor_amounts`), if provided, encrypt the transfer value using the recipient's, sender's, + /// and auditors' encryption keys, repectively. + /// 2. The sender's current balance (`current_balance`) and new balance (`new_balance`) encrypt the corresponding values + /// under the sender's encryption key (`sender_ek`) before and after the transfer, respectively. + /// 3. The relationship `new_balance = current_balance - transfer_amount` is maintained, ensuring balance integrity. + /// 4. The transferred value (`recipient_amount`) is properly normalized, with each chunk adhering to the range [0, 2^16). + /// 5. The sender's new balance is normalized, with each chunk in `new_balance` also adhering to the range [0, 2^16). + /// + /// If all conditions are satisfied, the proof validates the transfer; otherwise, the function causes an error. + public fun verify_transfer_proof( + sender_ek: &twisted_elgamal::CompressedPubkey, + recipient_ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + sender_amount: &confidential_balance::ConfidentialBalance, + recipient_amount: &confidential_balance::ConfidentialBalance, + auditor_eks: &vector, + auditor_amounts: &vector, + proof: &TransferProof) + { + verify_transfer_sigma_proof( + sender_ek, + recipient_ek, + current_balance, + new_balance, + sender_amount, + recipient_amount, + auditor_eks, + auditor_amounts, + &proof.sigma_proof + ); + verify_new_balance_range_proof(new_balance, &proof.zkrp_new_balance); + verify_transfer_amount_range_proof(recipient_amount, &proof.zkrp_transfer_amount); + } + + /// Verifies the validity of the `normalize` operation. + /// + /// This function ensures that the provided proof (`NormalizationProof`) meets the following conditions: + /// 1. The current balance (`current_balance`) and new balance (`new_balance`) encrypt the same value + /// under the same provided encryption key (`ek`), verifying that the normalization process preserves the balance value. + /// 2. The new balance (`new_balance`) is properly normalized, with each chunk adhering to the range [0, 2^16), + /// as verified through the range proof in the normalization process. + /// + /// If all conditions are satisfied, the proof validates the normalization; otherwise, the function causes an error. + public fun verify_normalization_proof( + ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + proof: &NormalizationProof) + { + verify_normalization_sigma_proof(ek, current_balance, new_balance, &proof.sigma_proof); + verify_new_balance_range_proof(new_balance, &proof.zkrp_new_balance); + } + + /// Verifies the validity of the `rotate_encryption_key` operation. + /// + /// This function ensures that the provided proof (`RotationProof`) meets the following conditions: + /// 1. The current balance (`current_balance`) and new balance (`new_balance`) encrypt the same value under the + /// current encryption key (`current_ek`) and the new encryption key (`new_ek`), respectively, verifying + /// that the key rotation preserves the balance value. + /// 2. The new balance (`new_balance`) is properly normalized, with each chunk adhering to the range [0, 2^16), + /// ensuring balance integrity after the key rotation. + /// + /// If all conditions are satisfied, the proof validates the key rotation; otherwise, the function causes an error. + public fun verify_rotation_proof( + current_ek: &twisted_elgamal::CompressedPubkey, + new_ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + proof: &RotationProof) + { + verify_rotation_sigma_proof(current_ek, new_ek, current_balance, new_balance, &proof.sigma_proof); + verify_new_balance_range_proof(new_balance, &proof.zkrp_new_balance); + } + + // + // Verification functions implementations + // + + /// Verifies the validity of the `WithdrawalSigmaProof`. + fun verify_withdrawal_sigma_proof( + ek: &twisted_elgamal::CompressedPubkey, + amount: u64, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + proof: &WithdrawalSigmaProof) + { + let amount_chunks = confidential_balance::split_into_chunks_u64(amount); + let amount = ristretto255::new_scalar_from_u64(amount); + + let rho = fiat_shamir_withdrawal_sigma_proof_challenge(ek, &amount_chunks, current_balance, &proof.xs); + + let gammas = msm_withdrawal_gammas(&rho); + + let scalars_lhs = vector[gammas.g1, gammas.g2]; + scalars_lhs.append(gammas.g3s); + scalars_lhs.append(gammas.g4s); + + let points_lhs = vector[ + ristretto255::point_decompress(&proof.xs.x1), + ristretto255::point_decompress(&proof.xs.x2) + ]; + points_lhs.append(proof.xs.x3s.map_ref(|x| ristretto255::point_decompress(x))); + points_lhs.append(proof.xs.x4s.map_ref(|x| ristretto255::point_decompress(x))); + + let scalar_g = scalar_linear_combination( + &proof.alphas.a1s, + &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16)) + ); + ristretto255::scalar_mul_assign(&mut scalar_g, &gammas.g1); + ristretto255::scalar_add_assign( + &mut scalar_g, + &scalar_linear_combination(&gammas.g3s, &proof.alphas.a1s) + ); + ristretto255::scalar_sub_assign(&mut scalar_g, &scalar_mul_3(&gammas.g1, &rho, &amount)); + + let scalar_h = ristretto255::scalar_mul(&gammas.g2, &proof.alphas.a3); + ristretto255::scalar_add_assign( + &mut scalar_h, + &scalar_linear_combination(&gammas.g3s, &proof.alphas.a4s) + ); + + let scalar_ek = ristretto255::scalar_mul(&gammas.g2, &rho); + ristretto255::scalar_add_assign( + &mut scalar_ek, + &scalar_linear_combination(&gammas.g4s, &proof.alphas.a4s) + ); + + let scalars_current_balance_d = vector::range(0, 8).map(|i| { + scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16)) + }); + + let scalars_new_balance_d = vector::range(0, 8).map(|i| { + ristretto255::scalar_mul(&gammas.g4s[i], &rho) + }); + + let scalars_current_balance_c = vector::range(0, 8).map(|i| { + scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16)) + }); + + let scalars_new_balance_c = vector::range(0, 8).map(|i| { + ristretto255::scalar_mul(&gammas.g3s[i], &rho) + }); + + let scalars_rhs = vector[scalar_g, scalar_h, scalar_ek]; + scalars_rhs.append(scalars_current_balance_d); + scalars_rhs.append(scalars_new_balance_d); + scalars_rhs.append(scalars_current_balance_c); + scalars_rhs.append(scalars_new_balance_c); + + let points_rhs = vector[ + ristretto255::basepoint(), + ristretto255::hash_to_point_base(), + twisted_elgamal::pubkey_to_point(ek) + ]; + points_rhs.append(confidential_balance::balance_to_points_d(current_balance)); + points_rhs.append(confidential_balance::balance_to_points_d(new_balance)); + points_rhs.append(confidential_balance::balance_to_points_c(current_balance)); + points_rhs.append(confidential_balance::balance_to_points_c(new_balance)); + + let lhs = ristretto255::multi_scalar_mul(&points_lhs, &scalars_lhs); + let rhs = ristretto255::multi_scalar_mul(&points_rhs, &scalars_rhs); + + assert!( + ristretto255::point_equals(&lhs, &rhs), + error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED) + ); + } + + /// Verifies the validity of the `TransferSigmaProof`. + fun verify_transfer_sigma_proof( + sender_ek: &twisted_elgamal::CompressedPubkey, + recipient_ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + sender_amount: &confidential_balance::ConfidentialBalance, + recipient_amount: &confidential_balance::ConfidentialBalance, + auditor_eks: &vector, + auditor_amounts: &vector, + proof: &TransferSigmaProof) + { + let rho = fiat_shamir_transfer_sigma_proof_challenge( + sender_ek, + recipient_ek, + current_balance, + new_balance, + sender_amount, + recipient_amount, + auditor_eks, + auditor_amounts, + &proof.xs + ); + + let gammas = msm_transfer_gammas(&rho, proof.xs.x7s.length()); + + let scalars_lhs = vector[gammas.g1]; + scalars_lhs.append(gammas.g2s); + scalars_lhs.append(gammas.g3s); + scalars_lhs.append(gammas.g4s); + scalars_lhs.push_back(gammas.g5); + scalars_lhs.append(gammas.g6s); + gammas.g7s.for_each(|gamma| scalars_lhs.append(gamma)); + scalars_lhs.append(gammas.g8s); + + let points_lhs = vector[ + ristretto255::point_decompress(&proof.xs.x1), + ]; + points_lhs.append(proof.xs.x2s.map_ref(|x| ristretto255::point_decompress(x))); + points_lhs.append(proof.xs.x3s.map_ref(|x| ristretto255::point_decompress(x))); + points_lhs.append(proof.xs.x4s.map_ref(|x| ristretto255::point_decompress(x))); + points_lhs.push_back(ristretto255::point_decompress(&proof.xs.x5)); + points_lhs.append(proof.xs.x6s.map_ref(|x| ristretto255::point_decompress(x))); + proof.xs.x7s.for_each_ref(|xs| { + points_lhs.append(xs.map_ref(|x| ristretto255::point_decompress(x))); + }); + points_lhs.append(proof.xs.x8s.map_ref(|x| ristretto255::point_decompress(x))); + + let scalar_g = scalar_linear_combination( + &proof.alphas.a1s, + &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16)) + ); + ristretto255::scalar_mul_assign(&mut scalar_g, &gammas.g1); + vector::range(0, 4).for_each(|i| { + ristretto255::scalar_add_assign( + &mut scalar_g, + &ristretto255::scalar_mul(&gammas.g4s[i], &proof.alphas.a4s[i]) + ); + }); + ristretto255::scalar_add_assign( + &mut scalar_g, + &scalar_linear_combination(&gammas.g6s, &proof.alphas.a1s) + ); + + let scalar_h = ristretto255::scalar_mul(&gammas.g5, &proof.alphas.a5); + vector::range(0, 8).for_each(|i| { + ristretto255::scalar_add_assign( + &mut scalar_h, + &scalar_mul_3(&gammas.g1, &proof.alphas.a6s[i], &new_scalar_from_pow2(i * 16)) + ); + }); + vector::range(0, 4).for_each(|i| { + ristretto255::scalar_sub_assign( + &mut scalar_h, + &scalar_mul_3(&gammas.g1, &proof.alphas.a3s[i], &new_scalar_from_pow2(i * 16)) + ); + }); + ristretto255::scalar_add_assign( + &mut scalar_h, + &scalar_linear_combination(&gammas.g4s, &proof.alphas.a3s) + ); + ristretto255::scalar_add_assign( + &mut scalar_h, + &scalar_linear_combination(&gammas.g6s, &proof.alphas.a6s) + ); + + let scalar_sender_ek = scalar_linear_combination(&gammas.g2s, &proof.alphas.a6s); + ristretto255::scalar_add_assign(&mut scalar_sender_ek, &ristretto255::scalar_mul(&gammas.g5, &rho)); + ristretto255::scalar_add_assign( + &mut scalar_sender_ek, + &scalar_linear_combination(&gammas.g8s, &proof.alphas.a3s) + ); + + let scalar_recipient_ek = ristretto255::scalar_zero(); + vector::range(0, 4).for_each(|i| { + ristretto255::scalar_add_assign( + &mut scalar_recipient_ek, + &ristretto255::scalar_mul(&gammas.g3s[i], &proof.alphas.a3s[i]) + ); + }); + + let scalar_ek_auditors = gammas.g7s.map_ref(|gamma: &vector| { + let scalar_auditor_ek = ristretto255::scalar_zero(); + vector::range(0, 4).for_each(|i| { + ristretto255::scalar_add_assign( + &mut scalar_auditor_ek, + &ristretto255::scalar_mul(&gamma[i], &proof.alphas.a3s[i]) + ); + }); + scalar_auditor_ek + }); + + let scalars_new_balance_d = vector::range(0, 8).map(|i| { + let scalar = ristretto255::scalar_mul(&gammas.g2s[i], &rho); + ristretto255::scalar_sub_assign( + &mut scalar, + &scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16)) + ); + scalar + }); + + let scalars_recipient_amount_d = vector::range(0, 4).map(|i| { + ristretto255::scalar_mul(&gammas.g3s[i], &rho) + }); + + let scalars_current_balance_d = vector::range(0, 8).map(|i| { + scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16)) + }); + + let scalars_auditor_amount_d = gammas.g7s.map_ref(|gamma| { + gamma.map_ref(|gamma| ristretto255::scalar_mul(gamma, &rho)) + }); + + let scalars_sender_amount_d = vector::range(0, 4).map(|i| { + ristretto255::scalar_mul(&gammas.g8s[i], &rho) + }); + + let scalars_current_balance_c = vector::range(0, 8).map(|i| { + scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16)) + }); + + let scalars_transfer_amount_c = vector::range(0, 4).map(|i| { + let scalar = ristretto255::scalar_mul(&gammas.g4s[i], &rho); + ristretto255::scalar_sub_assign( + &mut scalar, + &scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16)) + ); + scalar + }); + + let scalars_new_balance_c = vector::range(0, 8).map(|i| { + ristretto255::scalar_mul(&gammas.g6s[i], &rho) + }); + + let scalars_rhs = vector[scalar_g, scalar_h, scalar_sender_ek, scalar_recipient_ek]; + scalars_rhs.append(scalar_ek_auditors); + scalars_rhs.append(scalars_new_balance_d); + scalars_rhs.append(scalars_recipient_amount_d); + scalars_rhs.append(scalars_current_balance_d); + scalars_auditor_amount_d.for_each(|scalars| scalars_rhs.append(scalars)); + scalars_rhs.append(scalars_sender_amount_d); + scalars_rhs.append(scalars_current_balance_c); + scalars_rhs.append(scalars_transfer_amount_c); + scalars_rhs.append(scalars_new_balance_c); + + let points_rhs = vector[ + ristretto255::basepoint(), + ristretto255::hash_to_point_base(), + twisted_elgamal::pubkey_to_point(sender_ek), + twisted_elgamal::pubkey_to_point(recipient_ek) + ]; + points_rhs.append(auditor_eks.map_ref(|ek| twisted_elgamal::pubkey_to_point(ek))); + points_rhs.append(confidential_balance::balance_to_points_d(new_balance)); + points_rhs.append(confidential_balance::balance_to_points_d(recipient_amount)); + points_rhs.append(confidential_balance::balance_to_points_d(current_balance)); + auditor_amounts.for_each_ref(|balance| { + points_rhs.append(confidential_balance::balance_to_points_d(balance)); + }); + points_rhs.append(confidential_balance::balance_to_points_d(sender_amount)); + points_rhs.append(confidential_balance::balance_to_points_c(current_balance)); + points_rhs.append(confidential_balance::balance_to_points_c(recipient_amount)); + points_rhs.append(confidential_balance::balance_to_points_c(new_balance)); + + let lhs = ristretto255::multi_scalar_mul(&points_lhs, &scalars_lhs); + let rhs = ristretto255::multi_scalar_mul(&points_rhs, &scalars_rhs); + + assert!( + ristretto255::point_equals(&lhs, &rhs), + error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED) + ); + } + + /// Verifies the validity of the `NormalizationSigmaProof`. + fun verify_normalization_sigma_proof( + ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + proof: &NormalizationSigmaProof) + { + let rho = fiat_shamir_normalization_sigma_proof_challenge(ek, current_balance, new_balance, &proof.xs); + let gammas = msm_normalization_gammas(&rho); + + let scalars_lhs = vector[gammas.g1, gammas.g2]; + scalars_lhs.append(gammas.g3s); + scalars_lhs.append(gammas.g4s); + + let points_lhs = vector[ + ristretto255::point_decompress(&proof.xs.x1), + ristretto255::point_decompress(&proof.xs.x2) + ]; + points_lhs.append(proof.xs.x3s.map_ref(|x| ristretto255::point_decompress(x))); + points_lhs.append(proof.xs.x4s.map_ref(|x| ristretto255::point_decompress(x))); + + let scalar_g = scalar_linear_combination( + &proof.alphas.a1s, + &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16)) + ); + ristretto255::scalar_mul_assign(&mut scalar_g, &gammas.g1); + ristretto255::scalar_add_assign( + &mut scalar_g, + &scalar_linear_combination(&gammas.g3s, &proof.alphas.a1s) + ); + + let scalar_h = ristretto255::scalar_mul(&gammas.g2, &proof.alphas.a3); + ristretto255::scalar_add_assign( + &mut scalar_h, + &scalar_linear_combination(&gammas.g3s, &proof.alphas.a4s) + ); + + let scalar_ek = ristretto255::scalar_mul(&gammas.g2, &rho); + ristretto255::scalar_add_assign( + &mut scalar_ek, + &scalar_linear_combination(&gammas.g4s, &proof.alphas.a4s) + ); + + let scalars_current_balance_d = vector::range(0, 8).map(|i| { + scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16)) + }); + + let scalars_new_balance_d = vector::range(0, 8).map(|i| { + ristretto255::scalar_mul(&gammas.g4s[i], &rho) + }); + + let scalars_current_balance_c = vector::range(0, 8).map(|i| { + scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16)) + }); + + let scalars_new_balance_c = vector::range(0, 8).map(|i| { + ristretto255::scalar_mul(&gammas.g3s[i], &rho) + }); + + let scalars_rhs = vector[scalar_g, scalar_h, scalar_ek]; + scalars_rhs.append(scalars_current_balance_d); + scalars_rhs.append(scalars_new_balance_d); + scalars_rhs.append(scalars_current_balance_c); + scalars_rhs.append(scalars_new_balance_c); + + let points_rhs = vector[ + ristretto255::basepoint(), + ristretto255::hash_to_point_base(), + twisted_elgamal::pubkey_to_point(ek) + ]; + points_rhs.append(confidential_balance::balance_to_points_d(current_balance)); + points_rhs.append(confidential_balance::balance_to_points_d(new_balance)); + points_rhs.append(confidential_balance::balance_to_points_c(current_balance)); + points_rhs.append(confidential_balance::balance_to_points_c(new_balance)); + + let lhs = ristretto255::multi_scalar_mul(&points_lhs, &scalars_lhs); + let rhs = ristretto255::multi_scalar_mul(&points_rhs, &scalars_rhs); + + assert!( + ristretto255::point_equals(&lhs, &rhs), + error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED) + ); + } + + /// Verifies the validity of the `RotationSigmaProof`. + fun verify_rotation_sigma_proof( + current_ek: &twisted_elgamal::CompressedPubkey, + new_ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + proof: &RotationSigmaProof) + { + let rho = fiat_shamir_rotation_sigma_proof_challenge( + current_ek, + new_ek, + current_balance, + new_balance, + &proof.xs + ); + let gammas = msm_rotation_gammas(&rho); + + let scalars_lhs = vector[gammas.g1, gammas.g2, gammas.g3]; + scalars_lhs.append(gammas.g4s); + scalars_lhs.append(gammas.g5s); + + let points_lhs = vector[ + ristretto255::point_decompress(&proof.xs.x1), + ristretto255::point_decompress(&proof.xs.x2), + ristretto255::point_decompress(&proof.xs.x3) + ]; + points_lhs.append(proof.xs.x4s.map_ref(|x| ristretto255::point_decompress(x))); + points_lhs.append(proof.xs.x5s.map_ref(|x| ristretto255::point_decompress(x))); + + let scalar_g = scalar_linear_combination( + &proof.alphas.a1s, + &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16)) + ); + ristretto255::scalar_mul_assign(&mut scalar_g, &gammas.g1); + ristretto255::scalar_add_assign( + &mut scalar_g, + &scalar_linear_combination(&gammas.g4s, &proof.alphas.a1s) + ); + + let scalar_h = ristretto255::scalar_mul(&gammas.g2, &proof.alphas.a3); + ristretto255::scalar_add_assign(&mut scalar_h, &ristretto255::scalar_mul(&gammas.g3, &proof.alphas.a4)); + ristretto255::scalar_add_assign( + &mut scalar_h, + &scalar_linear_combination(&gammas.g4s, &proof.alphas.a5s) + ); + + let scalar_ek_cur = ristretto255::scalar_mul(&gammas.g2, &rho); + + let scalar_ek_new = ristretto255::scalar_mul(&gammas.g3, &rho); + ristretto255::scalar_add_assign( + &mut scalar_ek_new, + &scalar_linear_combination(&gammas.g5s, &proof.alphas.a5s) + ); + + let scalars_current_balance_d = vector::range(0, 8).map(|i| { + scalar_mul_3(&gammas.g1, &proof.alphas.a2, &new_scalar_from_pow2(i * 16)) + }); + + let scalars_new_balance_d = vector::range(0, 8).map(|i| { + ristretto255::scalar_mul(&gammas.g5s[i], &rho) + }); + + let scalars_current_balance_c = vector::range(0, 8).map(|i| { + scalar_mul_3(&gammas.g1, &rho, &new_scalar_from_pow2(i * 16)) + }); + + let scalars_new_balance_c = vector::range(0, 8).map(|i| { + ristretto255::scalar_mul(&gammas.g4s[i], &rho) + }); + + let scalars_rhs = vector[scalar_g, scalar_h, scalar_ek_cur, scalar_ek_new]; + scalars_rhs.append(scalars_current_balance_d); + scalars_rhs.append(scalars_new_balance_d); + scalars_rhs.append(scalars_current_balance_c); + scalars_rhs.append(scalars_new_balance_c); + + let points_rhs = vector[ + ristretto255::basepoint(), + ristretto255::hash_to_point_base(), + twisted_elgamal::pubkey_to_point(current_ek), + twisted_elgamal::pubkey_to_point(new_ek) + ]; + points_rhs.append(confidential_balance::balance_to_points_d(current_balance)); + points_rhs.append(confidential_balance::balance_to_points_d(new_balance)); + points_rhs.append(confidential_balance::balance_to_points_c(current_balance)); + points_rhs.append(confidential_balance::balance_to_points_c(new_balance)); + + let lhs = ristretto255::multi_scalar_mul(&points_lhs, &scalars_lhs); + let rhs = ristretto255::multi_scalar_mul(&points_rhs, &scalars_rhs); + + assert!( + ristretto255::point_equals(&lhs, &rhs), + error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED) + ); + } + + /// Verifies the validity of the `NewBalanceRangeProof`. + fun verify_new_balance_range_proof( + new_balance: &confidential_balance::ConfidentialBalance, + zkrp_new_balance: &RangeProof) + { + let balance_c = confidential_balance::balance_to_points_c(new_balance); + + assert!( + bulletproofs::verify_batch_range_proof( + &balance_c, + &ristretto255::basepoint(), + &ristretto255::hash_to_point_base(), + zkrp_new_balance, + BULLETPROOFS_NUM_BITS, + BULLETPROOFS_DST + ), + error::out_of_range(ERANGE_PROOF_VERIFICATION_FAILED) + ); + } + + /// Verifies the validity of the `TransferBalanceRangeProof`. + fun verify_transfer_amount_range_proof( + transfer_amount: &confidential_balance::ConfidentialBalance, + zkrp_transfer_amount: &RangeProof) + { + let balance_c = confidential_balance::balance_to_points_c(transfer_amount); + + assert!( + bulletproofs::verify_batch_range_proof( + &balance_c, + &ristretto255::basepoint(), + &ristretto255::hash_to_point_base(), + zkrp_transfer_amount, + BULLETPROOFS_NUM_BITS, + BULLETPROOFS_DST + ), + error::out_of_range(ERANGE_PROOF_VERIFICATION_FAILED) + ); + } + + // + // Friend public functions + // + + /// Returns the number of range proofs in the provided `WithdrawalProof`. + /// Used in the `confidential_asset` module to validate input parameters of the `confidential_transfer` function. + public(friend) fun auditors_count_in_transfer_proof(proof: &TransferProof): u64 { + proof.sigma_proof.xs.x7s.length() + } + + // + // Deserialization functions + // + + /// Deserializes the `WithdrawalProof` from the byte array. + /// Returns `Some(WithdrawalProof)` if the deserialization is successful; otherwise, returns `None`. + public fun deserialize_withdrawal_proof( + sigma_proof_bytes: vector, + zkrp_new_balance_bytes: vector): Option + { + let sigma_proof = deserialize_withdrawal_sigma_proof(sigma_proof_bytes); + let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance_bytes); + + if (sigma_proof.is_none()) { + return option::none() + }; + + option::some( + WithdrawalProof { + sigma_proof: sigma_proof.extract(), + zkrp_new_balance, + } + ) + } + + /// Deserializes the `TransferProof` from the byte array. + /// Returns `Some(TransferProof)` if the deserialization is successful; otherwise, returns `None`. + public fun deserialize_transfer_proof( + sigma_proof_bytes: vector, + zkrp_new_balance_bytes: vector, + zkrp_transfer_amount_bytes: vector): Option + { + let sigma_proof = deserialize_transfer_sigma_proof(sigma_proof_bytes); + let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance_bytes); + let zkrp_transfer_amount = bulletproofs::range_proof_from_bytes(zkrp_transfer_amount_bytes); + + if (sigma_proof.is_none()) { + return option::none() + }; + + option::some( + TransferProof { + sigma_proof: sigma_proof.extract(), + zkrp_new_balance, + zkrp_transfer_amount, + } + ) + } + + /// Deserializes the `NormalizationProof` from the byte array. + /// Returns `Some(NormalizationProof)` if the deserialization is successful; otherwise, returns `None`. + public fun deserialize_normalization_proof( + sigma_proof_bytes: vector, + zkrp_new_balance_bytes: vector): Option + { + let sigma_proof = deserialize_normalization_sigma_proof(sigma_proof_bytes); + let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance_bytes); + + if (sigma_proof.is_none()) { + return option::none() + }; + + option::some( + NormalizationProof { + sigma_proof: sigma_proof.extract(), + zkrp_new_balance, + } + ) + } + + /// Deserializes the `RotationProof` from the byte array. + /// Returns `Some(RotationProof)` if the deserialization is successful; otherwise, returns `None`. + public fun deserialize_rotation_proof( + sigma_proof_bytes: vector, + zkrp_new_balance_bytes: vector): Option + { + let sigma_proof = deserialize_rotation_sigma_proof(sigma_proof_bytes); + let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance_bytes); + + if (sigma_proof.is_none()) { + return option::none() + }; + + option::some( + RotationProof { + sigma_proof: sigma_proof.extract(), + zkrp_new_balance, + } + ) + } + + // + // Deserialization functions implementations + // + + /// Deserializes the `WithdrawalSigmaProof` from the byte array. + /// Returns `Some(WithdrawalSigmaProof)` if the deserialization is successful; otherwise, returns `None`. + fun deserialize_withdrawal_sigma_proof(proof_bytes: vector): Option { + let alphas_count = 18; + let xs_count = 18; + + if (proof_bytes.length() != 32 * xs_count + 32 * alphas_count) { + return option::none() + }; + + let alphas = vector::range(0, alphas_count).map(|i| { + ristretto255::new_scalar_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32)) + }); + let xs = vector::range(alphas_count, alphas_count + xs_count).map(|i| { + ristretto255::new_compressed_point_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32)) + }); + + if (alphas.any(|alpha| alpha.is_none()) || xs.any(|x| x.is_none())) { + return option::none() + }; + + option::some( + WithdrawalSigmaProof { + alphas: WithdrawalSigmaProofAlphas { + a1s: alphas.slice(0, 8).map(|alpha| alpha.extract()), + a2: alphas[8].extract(), + a3: alphas[9].extract(), + a4s: alphas.slice(10, 18).map(|alpha| alpha.extract()), + }, + xs: WithdrawalSigmaProofXs { + x1: xs[0].extract(), + x2: xs[1].extract(), + x3s: xs.slice(2, 10).map(|x| x.extract()), + x4s: xs.slice(10, 18).map(|x| x.extract()), + }, + } + ) + } + + /// Deserializes the `TransferSigmaProof` from the byte array. + /// Returns `Some(TransferSigmaProof)` if the deserialization is successful; otherwise, returns `None`. + fun deserialize_transfer_sigma_proof(proof_bytes: vector): Option { + let alphas_count = 26; + let xs_count = 30; + + if (proof_bytes.length() < 32 * xs_count + 32 * alphas_count) { + return option::none() + }; + + // Transfer proof may contain additional four Xs for each auditor. + let auditor_xs = proof_bytes.length() - (32 * xs_count + 32 * alphas_count); + + if (auditor_xs % 128 != 0) { + return option::none() + }; + + xs_count += auditor_xs / 32; + + let alphas = vector::range(0, alphas_count).map(|i| { + ristretto255::new_scalar_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32)) + }); + let xs = vector::range(alphas_count, alphas_count + xs_count).map(|i| { + ristretto255::new_compressed_point_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32)) + }); + + if (alphas.any(|alpha| alpha.is_none()) || xs.any(|x| x.is_none())) { + return option::none() + }; + + option::some( + TransferSigmaProof { + alphas: TransferSigmaProofAlphas { + a1s: alphas.slice(0, 8).map(|alpha| alpha.extract()), + a2: alphas[8].extract(), + a3s: alphas.slice(9, 13).map(|alpha| alpha.extract()), + a4s: alphas.slice(13, 17).map(|alpha| alpha.extract()), + a5: alphas[17].extract(), + a6s: alphas.slice(18, 26).map(|alpha| alpha.extract()), + }, + xs: TransferSigmaProofXs { + x1: xs[0].extract(), + x2s: xs.slice(1, 9).map(|x| x.extract()), + x3s: xs.slice(9, 13).map(|x| x.extract()), + x4s: xs.slice(13, 17).map(|x| x.extract()), + x5: xs[17].extract(), + x6s: xs.slice(18, 26).map(|x| x.extract()), + x7s: vector::range_with_step(26, xs_count - 4, 4).map(|i| { + vector::range(i, i + 4).map(|j| xs[j].extract()) + }), + x8s: xs.slice(xs_count - 4, xs_count).map(|x| x.extract()), + }, + } + ) + } + + /// Deserializes the `NormalizationSigmaProof` from the byte array. + /// Returns `Some(NormalizationSigmaProof)` if the deserialization is successful; otherwise, returns `None`. + fun deserialize_normalization_sigma_proof(proof_bytes: vector): Option { + let alphas_count = 18; + let xs_count = 18; + + if (proof_bytes.length() != 32 * xs_count + 32 * alphas_count) { + return option::none() + }; + + let alphas = vector::range(0, alphas_count).map(|i| { + ristretto255::new_scalar_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32)) + }); + let xs = vector::range(alphas_count, alphas_count + xs_count).map(|i| { + ristretto255::new_compressed_point_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32)) + }); + + if (alphas.any(|alpha| alpha.is_none()) || xs.any(|x| x.is_none())) { + return option::none() + }; + + option::some( + NormalizationSigmaProof { + alphas: NormalizationSigmaProofAlphas { + a1s: alphas.slice(0, 8).map(|alpha| alpha.extract()), + a2: alphas[8].extract(), + a3: alphas[9].extract(), + a4s: alphas.slice(10, 18).map(|alpha| alpha.extract()), + }, + xs: NormalizationSigmaProofXs { + x1: xs[0].extract(), + x2: xs[1].extract(), + x3s: xs.slice(2, 10).map(|x| x.extract()), + x4s: xs.slice(10, 18).map(|x| x.extract()), + }, + } + ) + } + + /// Deserializes the `RotationSigmaProof` from the byte array. + /// Returns `Some(RotationSigmaProof)` if the deserialization is successful; otherwise, returns `None`. + fun deserialize_rotation_sigma_proof(proof_bytes: vector): Option { + let alphas_count = 19; + let xs_count = 19; + + if (proof_bytes.length() != 32 * xs_count + 32 * alphas_count) { + return option::none() + }; + + let alphas = vector::range(0, alphas_count).map(|i| { + ristretto255::new_scalar_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32)) + }); + let xs = vector::range(alphas_count, alphas_count + xs_count).map(|i| { + ristretto255::new_compressed_point_from_bytes(proof_bytes.slice(i * 32, (i + 1) * 32)) + }); + + if (alphas.any(|alpha| alpha.is_none()) || xs.any(|x| x.is_none())) { + return option::none() + }; + + option::some( + RotationSigmaProof { + alphas: RotationSigmaProofAlphas { + a1s: alphas.slice(0, 8).map(|alpha| alpha.extract()), + a2: alphas[8].extract(), + a3: alphas[9].extract(), + a4: alphas[10].extract(), + a5s: alphas.slice(11, 19).map(|alpha| alpha.extract()), + }, + xs: RotationSigmaProofXs { + x1: xs[0].extract(), + x2: xs[1].extract(), + x3: xs[2].extract(), + x4s: xs.slice(3, 11).map(|x| x.extract()), + x5s: xs.slice(11, 19).map(|x| x.extract()), + }, + } + ) + } + + // + // Public view functions + // + + #[view] + /// Returns the Fiat Shamir DST for the `WithdrawalSigmaProof`. + public fun get_fiat_shamir_withdrawal_sigma_dst(): vector { + FIAT_SHAMIR_WITHDRAWAL_SIGMA_DST + } + + #[view] + /// Returns the Fiat Shamir DST for the `TransferSigmaProof`. + public fun get_fiat_shamir_transfer_sigma_dst(): vector { + FIAT_SHAMIR_TRANSFER_SIGMA_DST + } + + #[view] + /// Returns the Fiat Shamir DST for the `NormalizationSigmaProof`. + public fun get_fiat_shamir_normalization_sigma_dst(): vector { + FIAT_SHAMIR_NORMALIZATION_SIGMA_DST + } + + #[view] + /// Returns the Fiat Shamir DST for the `RotationSigmaProof`. + public fun get_fiat_shamir_rotation_sigma_dst(): vector { + FIAT_SHAMIR_ROTATION_SIGMA_DST + } + + #[view] + /// Returns the DST for the range proofs. + public fun get_bulletproofs_dst(): vector { + BULLETPROOFS_DST + } + + #[view] + /// Returns the maximum number of bits of the normalized chunk for the range proofs. + public fun get_bulletproofs_num_bits(): u64 { + BULLETPROOFS_NUM_BITS + } + + // + // Private functions for Fiat-Shamir challenge derivation. + // The Fiat Shamir is used to make the proofs non-interactive. + // The challenge has the same for the proof generation and verification and is derived from the public parameters. + // + + /// Derives the Fiat-Shamir challenge for the `WithdrawalSigmaProof`. + fun fiat_shamir_withdrawal_sigma_proof_challenge( + ek: &twisted_elgamal::CompressedPubkey, + amount_chunks: &vector, + current_balance: &confidential_balance::ConfidentialBalance, + proof_xs: &WithdrawalSigmaProofXs): Scalar + { + // rho = H(DST, G, H, P, v_{1..4}, (C_cur, D_cur)_{1..8}, X_{1..18}) + let bytes = FIAT_SHAMIR_WITHDRAWAL_SIGMA_DST; + + bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::basepoint_compressed())); + bytes.append( + ristretto255::compressed_point_to_bytes(ristretto255::point_compress(&ristretto255::hash_to_point_base())) + ); + bytes.append(twisted_elgamal::pubkey_to_bytes(ek)); + amount_chunks.for_each_ref(|chunk| { + bytes.append(ristretto255::scalar_to_bytes(chunk)); + }); + bytes.append(confidential_balance::balance_to_bytes(current_balance)); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x1)); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x2)); + proof_xs.x3s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof_xs.x4s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + + ristretto255::new_scalar_from_sha2_512(bytes) + } + + /// Derives the Fiat-Shamir challenge for the `TransferSigmaProof`. + fun fiat_shamir_transfer_sigma_proof_challenge( + sender_ek: &twisted_elgamal::CompressedPubkey, + recipient_ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + sender_amount: &confidential_balance::ConfidentialBalance, + recipient_amount: &confidential_balance::ConfidentialBalance, + auditor_eks: &vector, + auditor_amounts: &vector, + proof_xs: &TransferSigmaProofXs): Scalar + { + // rho = H(DST, G, H, P_s, P_r, P_a_{1..n}, (C_cur, D_cur)_{1..8}, (C_v, D_v)_{1..4}, D_a_{1..4n}, D_s_{1..4}, (C_new, D_new)_{1..8}, X_{1..30 + 4n}) + let bytes = FIAT_SHAMIR_TRANSFER_SIGMA_DST; + + bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::basepoint_compressed())); + bytes.append( + ristretto255::compressed_point_to_bytes(ristretto255::point_compress(&ristretto255::hash_to_point_base())) + ); + bytes.append(twisted_elgamal::pubkey_to_bytes(sender_ek)); + bytes.append(twisted_elgamal::pubkey_to_bytes(recipient_ek)); + auditor_eks.for_each_ref(|ek| { + bytes.append(twisted_elgamal::pubkey_to_bytes(ek)); + }); + bytes.append(confidential_balance::balance_to_bytes(current_balance)); + bytes.append(confidential_balance::balance_to_bytes(recipient_amount)); + auditor_amounts.for_each_ref(|balance| { + confidential_balance::balance_to_points_d(balance).for_each_ref(|d| { + bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::point_compress(d))); + }); + }); + confidential_balance::balance_to_points_d(sender_amount).for_each_ref(|d| { + bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::point_compress(d))); + }); + bytes.append(confidential_balance::balance_to_bytes(new_balance)); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x1)); + proof_xs.x2s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof_xs.x3s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof_xs.x4s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x5)); + proof_xs.x6s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof_xs.x7s.for_each_ref(|xs| { + xs.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + }); + proof_xs.x8s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + + ristretto255::new_scalar_from_sha2_512(bytes) + } + + /// Derives the Fiat-Shamir challenge for the `NormalizationSigmaProof`. + fun fiat_shamir_normalization_sigma_proof_challenge( + ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + proof_xs: &NormalizationSigmaProofXs): Scalar + { + // rho = H(DST, G, H, P, (C_cur, D_cur)_{1..8}, (C_new, D_new)_{1..8}, X_{1..18}) + let bytes = FIAT_SHAMIR_NORMALIZATION_SIGMA_DST; + + bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::basepoint_compressed())); + bytes.append( + ristretto255::compressed_point_to_bytes(ristretto255::point_compress(&ristretto255::hash_to_point_base())) + ); + bytes.append(twisted_elgamal::pubkey_to_bytes(ek)); + bytes.append(confidential_balance::balance_to_bytes(current_balance)); + bytes.append(confidential_balance::balance_to_bytes(new_balance)); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x1)); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x2)); + proof_xs.x3s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof_xs.x4s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + + ristretto255::new_scalar_from_sha2_512(bytes) + } + + /// Derives the Fiat-Shamir challenge for the `RotationSigmaProof`. + fun fiat_shamir_rotation_sigma_proof_challenge( + current_ek: &twisted_elgamal::CompressedPubkey, + new_ek: &twisted_elgamal::CompressedPubkey, + current_balance: &confidential_balance::ConfidentialBalance, + new_balance: &confidential_balance::ConfidentialBalance, + proof_xs: &RotationSigmaProofXs): Scalar + { + // rho = H(DST, G, H, P_cur, P_new, (C_cur, D_cur)_{1..8}, (C_new, D_new)_{1..8}, X_{1..19}) + let bytes = FIAT_SHAMIR_ROTATION_SIGMA_DST; + + bytes.append(ristretto255::compressed_point_to_bytes(ristretto255::basepoint_compressed())); + bytes.append( + ristretto255::compressed_point_to_bytes(ristretto255::point_compress(&ristretto255::hash_to_point_base())) + ); + bytes.append(twisted_elgamal::pubkey_to_bytes(current_ek)); + bytes.append(twisted_elgamal::pubkey_to_bytes(new_ek)); + bytes.append(confidential_balance::balance_to_bytes(current_balance)); + bytes.append(confidential_balance::balance_to_bytes(new_balance)); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x1)); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x2)); + bytes.append(ristretto255::point_to_bytes(&proof_xs.x3)); + proof_xs.x4s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof_xs.x5s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + + ristretto255::new_scalar_from_sha2_512(bytes) + } + + // + // Private functions for constructing the scalar multipliers (`gammas`) used for uniting multiple proof relations + // into a single multi-scalar multiplication (MSM) equation + // + + /// Returns the scalar multipliers for the `WithdrawalSigmaProof`. + fun msm_withdrawal_gammas(rho: &Scalar): WithdrawalSigmaProofGammas { + WithdrawalSigmaProofGammas { + g1: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 1)), + g2: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 2)), + g3s: vector::range(0, 8).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 3, (i as u8))) + }), + g4s: vector::range(0, 8).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 4, (i as u8))) + }), + } + } + + /// Returns the scalar multipliers for the `TransferSigmaProof`. + fun msm_transfer_gammas(rho: &Scalar, auditors_count: u64): TransferSigmaProofGammas { + TransferSigmaProofGammas { + g1: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 1)), + g2s: vector::range(0, 8).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 2, (i as u8))) + }), + g3s: vector::range(0, 4).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 3, (i as u8))) + }), + g4s: vector::range(0, 4).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 4, (i as u8))) + }), + g5: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 5)), + g6s: vector::range(0, 8).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 6, (i as u8))) + }), + g7s: vector::range(0, auditors_count).map(|i| { + vector::range(0, 4).map(|j| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, (i + 7 as u8), (j as u8))) + }) + }), + g8s: vector::range(0, 4).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 8, (i as u8))) + }), + } + } + + /// Returns the scalar multipliers for the `NormalizationSigmaProof`. + fun msm_normalization_gammas(rho: &Scalar): NormalizationSigmaProofGammas { + NormalizationSigmaProofGammas { + g1: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 1)), + g2: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 2)), + g3s: vector::range(0, 8).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 3, (i as u8))) + }), + g4s: vector::range(0, 8).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 4, (i as u8))) + }), + } + } + + /// Returns the scalar multipliers for the `RotationSigmaProof`. + fun msm_rotation_gammas(rho: &Scalar): RotationSigmaProofGammas { + RotationSigmaProofGammas { + g1: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 1)), + g2: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 2)), + g3: ristretto255::new_scalar_from_sha2_512(msm_gamma_1(rho, 3)), + g4s: vector::range(0, 8).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 4, (i as u8))) + }), + g5s: vector::range(0, 8).map(|i| { + ristretto255::new_scalar_from_sha2_512(msm_gamma_2(rho, 5, (i as u8))) + }), + } + } + + /// Returns the scalar multiplier computed as a hash of the provided `rho` and corresponding `gamma` index. + fun msm_gamma_1(rho: &Scalar, i: u8): vector { + let bytes = ristretto255::scalar_to_bytes(rho); + bytes.push_back(i); + bytes + } + + /// Returns the scalar multiplier computed as a hash of the provided `rho` and corresponding `gamma` indices. + fun msm_gamma_2(rho: &Scalar, i: u8, j: u8): vector { + let bytes = ristretto255::scalar_to_bytes(rho); + bytes.push_back(i); + bytes.push_back(j); + bytes + } + + /// Calculates the product of the provided scalars. + fun scalar_mul_3(scalar1: &Scalar, scalar2: &Scalar, scalar3: &Scalar): Scalar { + let result = *scalar1; + + ristretto255::scalar_mul_assign(&mut result, scalar2); + ristretto255::scalar_mul_assign(&mut result, scalar3); + + result + } + + /// Calculates the linear combination of the provided scalars. + fun scalar_linear_combination(lhs: &vector, rhs: &vector): Scalar { + let result = ristretto255::scalar_zero(); + + lhs.zip_ref(rhs, |l, r| { + ristretto255::scalar_add_assign(&mut result, &ristretto255::scalar_mul(l, r)); + }); + + result + } + + /// Raises 2 to the power of the provided exponent and returns the result as a scalar. + fun new_scalar_from_pow2(exp: u64): Scalar { + ristretto255::new_scalar_from_u128(1 << (exp as u8)) + } + + // + // Test-only structs + // + + #[test_only] struct WithdrawalSigmaProofRandomness has drop { + x1s: vector, + x2: Scalar, + x3: Scalar, + x4s: vector, + } + + #[test_only] struct TransferSigmaProofRandomness has drop { + x1s: vector, + x2: Scalar, + x3s: vector, + x4s: vector, + x5: Scalar, + x6s: vector, + } + + #[test_only] struct NormalizationSigmaProofRandomness has drop { + x1s: vector, + x2: Scalar, + x3: Scalar, + x4s: vector, + } + + #[test_only] struct RotationSigmaProofRandomness has drop { + x1s: vector, + x2: Scalar, + x3: Scalar, + x4: Scalar, + x5s: vector, + } + + // + // Test-only prove functions + // + + #[test_only] + public fun prove_withdrawal( + dk: &Scalar, + ek: &twisted_elgamal::CompressedPubkey, + amount: u64, + new_amount: u128, + current_balance: &confidential_balance::ConfidentialBalance + ): (WithdrawalProof, confidential_balance::ConfidentialBalance) + { + let new_balance_r = confidential_balance::generate_balance_randomness(); + let new_balance = confidential_balance::new_actual_balance_from_u128(new_amount, &new_balance_r, ek); + + let new_balance_r = confidential_balance::balance_randomness_as_scalars(&new_balance_r); + + let sigma_r = generate_withdrawal_sigma_proof_randomness(); + + let zkrp_new_balance = prove_new_balance_range(new_amount, new_balance_r); + + let x1 = ristretto255::basepoint_mul( + &scalar_linear_combination(&sigma_r.x1s, &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16))) + ); + ristretto255::point_add_assign( + &mut x1, + &ristretto255::point_mul( + &ristretto255::multi_scalar_mul( + &confidential_balance::balance_to_points_d(current_balance), + &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16)) + ), + &sigma_r.x2 + ) + ); + + let x2 = ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x3); + let x3s = vector::range(0, 8).map(|i| { + let x3i = ristretto255::basepoint_mul(&sigma_r.x1s[i]); + ristretto255::point_add_assign( + &mut x3i, + &ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x4s[i]) + ); + x3i + }); + let x4s = vector::range(0, 8).map(|i| { + ristretto255::point_mul(&twisted_elgamal::pubkey_to_point(ek), &sigma_r.x4s[i]) + }); + + let proof_xs = WithdrawalSigmaProofXs { + x1: ristretto255::point_compress(&x1), + x2: ristretto255::point_compress(&x2), + x3s: x3s.map(|x| ristretto255::point_compress(&x)), + x4s: x4s.map(|x| ristretto255::point_compress(&x)), + }; + + let amount_chunks = confidential_balance::split_into_chunks_u64(amount); + + let rho = fiat_shamir_withdrawal_sigma_proof_challenge(ek, &amount_chunks, current_balance, &proof_xs); + + let new_amount_chunks = confidential_balance::split_into_chunks_u128(new_amount); + + let a1s = vector::range(0, 8).map(|i| { + ristretto255::scalar_sub(&sigma_r.x1s[i], &ristretto255::scalar_mul(&rho, &new_amount_chunks[i])) + }); + let a2 = ristretto255::scalar_sub(&sigma_r.x2, &ristretto255::scalar_mul(&rho, dk)); + let a3 = ristretto255::scalar_sub( + &sigma_r.x3, + &ristretto255::scalar_mul(&rho, &ristretto255::scalar_invert(dk).extract()) + ); + let a4s = vector::range(0, 8).map(|i| { + ristretto255::scalar_sub(&sigma_r.x4s[i], &ristretto255::scalar_mul(&rho, &new_balance_r[i])) + }); + + ( + WithdrawalProof { + sigma_proof: WithdrawalSigmaProof { + xs: proof_xs, + alphas: WithdrawalSigmaProofAlphas { a1s, a2, a3, a4s } + }, + zkrp_new_balance + }, + new_balance + ) + } + + #[test_only] + public fun prove_transfer( + sender_dk: &Scalar, + sender_ek: &twisted_elgamal::CompressedPubkey, + recipient_ek: &twisted_elgamal::CompressedPubkey, + amount: u64, + new_amount: u128, + current_balance: &confidential_balance::ConfidentialBalance, + auditor_eks: &vector + ): ( + TransferProof, + confidential_balance::ConfidentialBalance, + confidential_balance::ConfidentialBalance, + confidential_balance::ConfidentialBalance, + vector + ) + { + let amount_r = confidential_balance::generate_balance_randomness(); + let new_balance_r = confidential_balance::generate_balance_randomness(); + + let new_balance = confidential_balance::new_actual_balance_from_u128(new_amount, &new_balance_r, sender_ek); + + let sender_amount = confidential_balance::new_pending_balance_from_u64(amount, &amount_r, sender_ek); + let recipient_amount = confidential_balance::new_pending_balance_from_u64( + amount, + &amount_r, + recipient_ek + ); + let auditor_amounts = auditor_eks.map_ref(|ek| { + confidential_balance::new_pending_balance_from_u64(amount, &amount_r, ek) + }); + + let amount_r = confidential_balance::balance_randomness_as_scalars(&amount_r).slice(0, 4); + let new_balance_r = confidential_balance::balance_randomness_as_scalars(&new_balance_r); + + let sigma_r = generate_transfer_sigma_proof_randomness(); + + let zkrp_new_balance = prove_new_balance_range(new_amount, new_balance_r); + let zkrp_transfer_amount = prove_transfer_amount_range(amount, &amount_r); + + let x1 = ristretto255::basepoint_mul( + &scalar_linear_combination(&sigma_r.x1s, &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16))) + ); + + ristretto255::point_add_assign( + &mut x1, + &ristretto255::point_mul( + &ristretto255::hash_to_point_base(), + &ristretto255::scalar_sub( + &scalar_linear_combination( + &sigma_r.x6s, + &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16)) + ), + &scalar_linear_combination(&sigma_r.x3s, &vector::range(0, 4).map(|i| new_scalar_from_pow2(i * 16))) + ) + ), + ); + + let current_balance_d = confidential_balance::balance_to_points_d(current_balance); + let new_balance_d = confidential_balance::balance_to_points_d(&new_balance); + + vector::range(0, 8).for_each(|i| { + ristretto255::point_add_assign( + &mut x1, + &ristretto255::point_mul( + ¤t_balance_d[i], + &ristretto255::scalar_mul(&sigma_r.x2, &new_scalar_from_pow2(i * 16)) + ), + ); + }); + vector::range(0, 8).for_each(|i| { + ristretto255::point_sub_assign( + &mut x1, + &ristretto255::point_mul( + &new_balance_d[i], + &ristretto255::scalar_mul(&sigma_r.x2, &new_scalar_from_pow2(i * 16)) + ), + ); + }); + + let x2s = vector::range(0, 8).map(|i| { + ristretto255::point_mul(&twisted_elgamal::pubkey_to_point(sender_ek), &sigma_r.x6s[i]) + }); + let x3s = vector::range(0, 4).map(|i| { + ristretto255::point_mul(&twisted_elgamal::pubkey_to_point(recipient_ek), &sigma_r.x3s[i]) + }); + let x4s = vector::range(0, 4).map(|i| { + let x4i = ristretto255::basepoint_mul(&sigma_r.x4s[i]); + ristretto255::point_add_assign( + &mut x4i, + &ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x3s[i]) + ); + x4i + }); + let x5 = ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x5); + let x6s = vector::range(0, 8).map(|i| { + let x6i = ristretto255::basepoint_mul(&sigma_r.x1s[i]); + ristretto255::point_add_assign( + &mut x6i, + &ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x6s[i]) + ); + x6i + }); + let x7s = auditor_eks.map_ref(|ek| { + vector::range(0, 4).map(|i| { + ristretto255::point_mul(&twisted_elgamal::pubkey_to_point(ek), &sigma_r.x3s[i]) + }) + }); + let x8s = vector::range(0, 4).map(|i| { + ristretto255::point_mul(&twisted_elgamal::pubkey_to_point(sender_ek), &sigma_r.x3s[i]) + }); + + let proof_xs = TransferSigmaProofXs { + x1: ristretto255::point_compress(&x1), + x2s: x2s.map(|x| ristretto255::point_compress(&x)), + x3s: x3s.map(|x| ristretto255::point_compress(&x)), + x4s: x4s.map(|x| ristretto255::point_compress(&x)), + x5: ristretto255::point_compress(&x5), + x6s: x6s.map(|x| ristretto255::point_compress(&x)), + x7s: x7s.map(|xs| { + xs.map(|x| ristretto255::point_compress(&x)) + }), + x8s: x8s.map(|x| ristretto255::point_compress(&x)), + }; + + let rho = fiat_shamir_transfer_sigma_proof_challenge( + sender_ek, + recipient_ek, + current_balance, + &new_balance, + &sender_amount, + &recipient_amount, + auditor_eks, + &auditor_amounts, + &proof_xs + ); + + let amount_chunks = confidential_balance::split_into_chunks_u64(amount); + let new_amount_chunks = confidential_balance::split_into_chunks_u128(new_amount); + + let a1s = vector::range(0, 8).map(|i| { + ristretto255::scalar_sub(&sigma_r.x1s[i], &ristretto255::scalar_mul(&rho, &new_amount_chunks[i])) + }); + let a2 = ristretto255::scalar_sub(&sigma_r.x2, &ristretto255::scalar_mul(&rho, sender_dk)); + let a3s = vector::range(0, 4).map(|i| { + ristretto255::scalar_sub(&sigma_r.x3s[i], &ristretto255::scalar_mul(&rho, &amount_r[i])) + }); + let a4s = vector::range(0, 4).map(|i| { + ristretto255::scalar_sub(&sigma_r.x4s[i], &ristretto255::scalar_mul(&rho, &amount_chunks[i])) + }); + let a5 = ristretto255::scalar_sub( + &sigma_r.x5, + &ristretto255::scalar_mul(&rho, &ristretto255::scalar_invert(sender_dk).extract()) + ); + let a6s = vector::range(0, 8).map(|i| { + ristretto255::scalar_sub(&sigma_r.x6s[i], &ristretto255::scalar_mul(&rho, &new_balance_r[i])) + }); + + ( + TransferProof { + sigma_proof: TransferSigmaProof { + xs: proof_xs, + alphas: TransferSigmaProofAlphas { a1s, a2, a3s, a4s, a5, a6s } + }, + zkrp_new_balance, + zkrp_transfer_amount + }, + new_balance, + sender_amount, + recipient_amount, + auditor_amounts, + ) + } + + #[test_only] + public fun prove_normalization( + dk: &Scalar, + ek: &twisted_elgamal::CompressedPubkey, + amount: u128, + current_balance: &confidential_balance::ConfidentialBalance + ): (NormalizationProof, confidential_balance::ConfidentialBalance) + { + let new_balance_r = confidential_balance::generate_balance_randomness(); + let new_balance = confidential_balance::new_actual_balance_from_u128(amount, &new_balance_r, ek); + + let new_balance_r = confidential_balance::balance_randomness_as_scalars(&new_balance_r); + + let sigma_r = generate_normalization_sigma_proof_randomness(); + + let zkrp_new_balance = prove_new_balance_range(amount, new_balance_r); + + let x1 = ristretto255::basepoint_mul( + &scalar_linear_combination(&sigma_r.x1s, &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16))) + ); + + let current_balance_d = confidential_balance::balance_to_points_d(current_balance); + + vector::range(0, 8).for_each(|i| { + ristretto255::point_add_assign( + &mut x1, + &ristretto255::point_mul( + ¤t_balance_d[i], + &ristretto255::scalar_mul(&sigma_r.x2, &new_scalar_from_pow2(i * 16)) + ) + ); + }); + + let x2 = ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x3); + let x3s = vector::range(0, 8).map(|i| { + let x3i = ristretto255::basepoint_mul(&sigma_r.x1s[i]); + ristretto255::point_add_assign( + &mut x3i, + &ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x4s[i]) + ); + x3i + }); + let x4s = vector::range(0, 8).map(|i| { + ristretto255::point_mul(&twisted_elgamal::pubkey_to_point(ek), &sigma_r.x4s[i]) + }); + + let proof_xs = NormalizationSigmaProofXs { + x1: ristretto255::point_compress(&x1), + x2: ristretto255::point_compress(&x2), + x3s: x3s.map(|x| ristretto255::point_compress(&x)), + x4s: x4s.map(|x| ristretto255::point_compress(&x)), + }; + + let rho = fiat_shamir_normalization_sigma_proof_challenge( + ek, + current_balance, + &new_balance, + &proof_xs + ); + + let amount_chunks = confidential_balance::split_into_chunks_u128(amount); + + let a1s = vector::range(0, 8).map(|i| { + ristretto255::scalar_sub(&sigma_r.x1s[i], &ristretto255::scalar_mul(&rho, &amount_chunks[i])) + }); + let a2 = ristretto255::scalar_sub(&sigma_r.x2, &ristretto255::scalar_mul(&rho, dk)); + let a3 = ristretto255::scalar_sub( + &sigma_r.x3, + &ristretto255::scalar_mul(&rho, &ristretto255::scalar_invert(dk).extract()) + ); + let a4s = vector::range(0, 8).map(|i| { + ristretto255::scalar_sub(&sigma_r.x4s[i], &ristretto255::scalar_mul(&rho, &new_balance_r[i])) + }); + + ( + NormalizationProof { + sigma_proof: NormalizationSigmaProof { + xs: proof_xs, + alphas: NormalizationSigmaProofAlphas { a1s, a2, a3, a4s } + }, + zkrp_new_balance + }, + new_balance + ) + } + + #[test_only] + public fun prove_rotation( + current_dk: &Scalar, + new_dk: &Scalar, + current_ek: &twisted_elgamal::CompressedPubkey, + new_ek: &twisted_elgamal::CompressedPubkey, + amount: u128, + current_balance: &confidential_balance::ConfidentialBalance + ): (RotationProof, confidential_balance::ConfidentialBalance) + { + let new_balance_r = confidential_balance::generate_balance_randomness(); + let new_balance = confidential_balance::new_actual_balance_from_u128(amount, &new_balance_r, new_ek); + + let new_balance_r = confidential_balance::balance_randomness_as_scalars(&new_balance_r); + + let sigma_r = generate_rotation_sigma_proof_randomness(); + + let zkrp_new_balance = prove_new_balance_range(amount, new_balance_r); + + let x1 = ristretto255::basepoint_mul( + &scalar_linear_combination(&sigma_r.x1s, &vector::range(0, 8).map(|i| new_scalar_from_pow2(i * 16))) + ); + let current_balance_d = confidential_balance::balance_to_points_d(current_balance); + + vector::range(0, 8).for_each(|i| { + ristretto255::point_add_assign( + &mut x1, + &ristretto255::point_mul( + ¤t_balance_d[i], + &ristretto255::scalar_mul(&sigma_r.x2, &new_scalar_from_pow2(i * 16)) + ) + ); + }); + + let x2 = ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x3); + let x3 = ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x4); + let x4s = vector::range(0, 8).map(|i| { + let x4i = ristretto255::basepoint_mul(&sigma_r.x1s[i]); + ristretto255::point_add_assign( + &mut x4i, + &ristretto255::point_mul(&ristretto255::hash_to_point_base(), &sigma_r.x5s[i]) + ); + x4i + }); + let x5s = vector::range(0, 8).map(|i| { + ristretto255::point_mul(&twisted_elgamal::pubkey_to_point(new_ek), &sigma_r.x5s[i]) + }); + + let proof_xs = RotationSigmaProofXs { + x1: ristretto255::point_compress(&x1), + x2: ristretto255::point_compress(&x2), + x3: ristretto255::point_compress(&x3), + x4s: x4s.map(|x| ristretto255::point_compress(&x)), + x5s: x5s.map(|x| ristretto255::point_compress(&x)), + }; + + let rho = fiat_shamir_rotation_sigma_proof_challenge( + current_ek, + new_ek, + current_balance, + &new_balance, + &proof_xs + ); + + let amount_chunks = confidential_balance::split_into_chunks_u128(amount); + + let a1s = vector::range(0, 8).map(|i| { + ristretto255::scalar_sub(&sigma_r.x1s[i], &ristretto255::scalar_mul(&rho, &amount_chunks[i])) + }); + let a2 = ristretto255::scalar_sub(&sigma_r.x2, &ristretto255::scalar_mul(&rho, current_dk)); + let a3 = ristretto255::scalar_sub( + &sigma_r.x3, + &ristretto255::scalar_mul(&rho, &ristretto255::scalar_invert(current_dk).extract()) + ); + let a4 = ristretto255::scalar_sub( + &sigma_r.x4, + &ristretto255::scalar_mul(&rho, &ristretto255::scalar_invert(new_dk).extract()) + ); + let a5s = vector::range(0, 8).map(|i| { + ristretto255::scalar_sub(&sigma_r.x5s[i], &ristretto255::scalar_mul(&rho, &new_balance_r[i])) + }); + + ( + RotationProof { + sigma_proof: RotationSigmaProof { + xs: proof_xs, + alphas: RotationSigmaProofAlphas { a1s, a2, a3, a4, a5s } + }, + zkrp_new_balance + }, + new_balance + ) + } + + // + // Test-only serialization functions + // + + #[test_only] + public fun serialize_withdrawal_proof(proof: &WithdrawalProof): (vector, vector) { + ( + serialize_withdrawal_sigma_proof(&proof.sigma_proof), + bulletproofs::range_proof_to_bytes(&proof.zkrp_new_balance) + ) + } + + #[test_only] + public fun serialize_transfer_proof(proof: &TransferProof): (vector, vector, vector) { + ( + serialize_transfer_sigma_proof(&proof.sigma_proof), + bulletproofs::range_proof_to_bytes(&proof.zkrp_new_balance), + bulletproofs::range_proof_to_bytes(&proof.zkrp_transfer_amount) + ) + } + + #[test_only] + public fun serialize_normalization_proof(proof: &NormalizationProof): (vector, vector) { + ( + serialize_normalization_sigma_proof(&proof.sigma_proof), + bulletproofs::range_proof_to_bytes(&proof.zkrp_new_balance) + ) + } + + #[test_only] + public fun serialize_rotation_proof(proof: &RotationProof): (vector, vector) { + ( + serialize_rotation_sigma_proof(&proof.sigma_proof), + bulletproofs::range_proof_to_bytes(&proof.zkrp_new_balance) + ) + } + + #[test_only] fun serialize_withdrawal_sigma_proof(proof: &WithdrawalSigmaProof): vector { + let bytes = vector[]; + + proof.alphas.a1s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a2)); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a3)); + proof.alphas.a4s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + + bytes.append(ristretto255::point_to_bytes(&proof.xs.x1)); + bytes.append(ristretto255::point_to_bytes(&proof.xs.x2)); + proof.xs.x3s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof.xs.x4s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + + bytes + } + + #[test_only] fun serialize_transfer_sigma_proof(proof: &TransferSigmaProof): vector { + let bytes = vector[]; + + proof.alphas.a1s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a2)); + proof.alphas.a3s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + proof.alphas.a4s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a5)); + proof.alphas.a6s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + + bytes.append(ristretto255::point_to_bytes(&proof.xs.x1)); + proof.xs.x2s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof.xs.x3s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof.xs.x4s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + bytes.append(ristretto255::point_to_bytes(&proof.xs.x5)); + proof.xs.x6s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof.xs.x7s.for_each_ref(|xs| { + xs.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + }); + proof.xs.x8s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + + bytes + } + + #[test_only] fun serialize_normalization_sigma_proof(proof: &NormalizationSigmaProof): vector { + let bytes = vector[]; + + proof.alphas.a1s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a2)); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a3)); + proof.alphas.a4s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + + bytes.append(ristretto255::point_to_bytes(&proof.xs.x1)); + bytes.append(ristretto255::point_to_bytes(&proof.xs.x2)); + proof.xs.x3s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof.xs.x4s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + + bytes + } + + #[test_only] fun serialize_rotation_sigma_proof(proof: &RotationSigmaProof): vector { + let bytes = vector[]; + + proof.alphas.a1s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a2)); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a3)); + bytes.append(ristretto255::scalar_to_bytes(&proof.alphas.a4)); + proof.alphas.a5s.for_each_ref(|alpha| { + bytes.append(ristretto255::scalar_to_bytes(alpha)); + }); + + bytes.append(ristretto255::point_to_bytes(&proof.xs.x1)); + bytes.append(ristretto255::point_to_bytes(&proof.xs.x2)); + bytes.append(ristretto255::point_to_bytes(&proof.xs.x3)); + proof.xs.x4s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + proof.xs.x5s.for_each_ref(|x| { + bytes.append(ristretto255::point_to_bytes(x)); + }); + + bytes + } + + // + // Test-only private functions + // + + #[test_only] + fun prove_new_balance_range(new_amount: u128, randomness: &vector): RangeProof { + let new_amount_chunks = confidential_balance::split_into_chunks_u128(new_amount); + + let (proof, _) = bulletproofs::prove_batch_range_pedersen( + &new_amount_chunks, + randomness, + BULLETPROOFS_NUM_BITS, + BULLETPROOFS_DST); + proof + } + + #[test_only] + fun prove_transfer_amount_range(amount: u64, randomness: &vector): RangeProof { + let amount_chunks = confidential_balance::split_into_chunks_u64(amount); + + let (proof, _) = bulletproofs::prove_batch_range_pedersen( + &amount_chunks, + randomness, + BULLETPROOFS_NUM_BITS, + BULLETPROOFS_DST); + proof + } + + #[test_only] fun generate_withdrawal_sigma_proof_randomness(): WithdrawalSigmaProofRandomness { + WithdrawalSigmaProofRandomness { + x1s: vector::range(0, 8).map(|_| ristretto255::random_scalar()), + x2: ristretto255::random_scalar(), + x3: ristretto255::random_scalar(), + x4s: vector::range(0, 8).map(|_| ristretto255::random_scalar()), + } + } + + #[test_only] fun generate_transfer_sigma_proof_randomness(): TransferSigmaProofRandomness { + TransferSigmaProofRandomness { + x1s: vector::range(0, 8).map(|_| ristretto255::random_scalar()), + x2: ristretto255::random_scalar(), + x3s: vector::range(0, 4).map(|_| ristretto255::random_scalar()), + x4s: vector::range(0, 4).map(|_| ristretto255::random_scalar()), + x5: ristretto255::random_scalar(), + x6s: vector::range(0, 8).map(|_| ristretto255::random_scalar()), + } + } + + #[test_only] fun generate_normalization_sigma_proof_randomness(): NormalizationSigmaProofRandomness { + NormalizationSigmaProofRandomness { + x1s: vector::range(0, 8).map(|_| ristretto255::random_scalar()), + x2: ristretto255::random_scalar(), + x3: ristretto255::random_scalar(), + x4s: vector::range(0, 8).map(|_| ristretto255::random_scalar()), + } + } + + #[test_only] fun generate_rotation_sigma_proof_randomness(): RotationSigmaProofRandomness { + RotationSigmaProofRandomness { + x1s: vector::range(0, 8).map(|_| ristretto255::random_scalar()), + x2: ristretto255::random_scalar(), + x3: ristretto255::random_scalar(), + x4: ristretto255::random_scalar(), + x5s: vector::range(0, 8).map(|_| ristretto255::random_scalar()), + } + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_proof.spec.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_proof.spec.move new file mode 100644 index 0000000000000..0c7031bdc332f --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/confidential_proof.spec.move @@ -0,0 +1,2 @@ +spec aptos_experimental::confidential_proof { +} diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/ristretto255_twisted_elgamal.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/ristretto255_twisted_elgamal.move new file mode 100644 index 0000000000000..8f31e2b68fbbf --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/ristretto255_twisted_elgamal.move @@ -0,0 +1,261 @@ +/// This module implements a Twisted ElGamal encryption API, over the Ristretto255 curve, designed to work with +/// additional cryptographic constructs such as Bulletproofs. +/// +/// A Twisted ElGamal *ciphertext* encrypts a value `v` under a basepoint `G` and a secondary point `H`, +/// alongside a public key `Y = sk^(-1) * H`, where `sk` is the corresponding secret key. The ciphertext is of the form: +/// `(v * G + r * H, r * Y)`, where `r` is a random scalar. +/// +/// The Twisted ElGamal scheme differs from standard ElGamal by introducing a secondary point `H` to enhance +/// flexibility and functionality in cryptographic protocols. This design still maintains the homomorphic property: +/// `Enc_Y(v, r) + Enc_Y(v', r') = Enc_Y(v + v', r + r')`, where `v, v'` are plaintexts, `Y` is the public key, +/// and `r, r'` are random scalars. +module aptos_experimental::ristretto255_twisted_elgamal { + use std::option::Option; + use aptos_std::ristretto255::{Self, CompressedRistretto, RistrettoPoint, Scalar}; + + // + // Structs + // + + /// A Twisted ElGamal ciphertext, consisting of two Ristretto255 points. + struct Ciphertext has drop { + left: RistrettoPoint, // v * G + r * H + right: RistrettoPoint, // r * Y + } + + /// A compressed Twisted ElGamal ciphertext, consisting of two compressed Ristretto255 points. + struct CompressedCiphertext has store, copy, drop { + left: CompressedRistretto, + right: CompressedRistretto, + } + + /// A Twisted ElGamal public key, represented as a compressed Ristretto255 point. + struct CompressedPubkey has store, copy, drop { + point: CompressedRistretto, + } + + // + // Public functions + // + + /// Creates a new public key from a serialized Ristretto255 point. + /// Returns `Some(CompressedPubkey)` if the deserialization is successful, otherwise `None`. + public fun new_pubkey_from_bytes(bytes: vector): Option { + let point = ristretto255::new_compressed_point_from_bytes(bytes); + if (point.is_some()) { + let pk = CompressedPubkey { + point: point.extract() + }; + std::option::some(pk) + } else { + std::option::none() + } + } + + /// Serializes a Twisted ElGamal public key into its byte representation. + public fun pubkey_to_bytes(pubkey: &CompressedPubkey): vector { + ristretto255::compressed_point_to_bytes(pubkey.point) + } + + /// Converts a public key into its corresponding `RistrettoPoint`. + public fun pubkey_to_point(pubkey: &CompressedPubkey): RistrettoPoint { + ristretto255::point_decompress(&pubkey.point) + } + + /// Converts a public key into its corresponding `CompressedRistretto` representation. + public fun pubkey_to_compressed_point(pubkey: &CompressedPubkey): CompressedRistretto { + pubkey.point + } + + /// Creates a new ciphertext from a serialized representation, consisting of two 32-byte Ristretto255 points. + /// Returns `Some(Ciphertext)` if the deserialization succeeds, otherwise `None`. + public fun new_ciphertext_from_bytes(bytes: vector): Option { + if (bytes.length() != 64) { + return std::option::none() + }; + + let bytes_right = bytes.trim(32); + + let left_point = ristretto255::new_point_from_bytes(bytes); + let right_point = ristretto255::new_point_from_bytes(bytes_right); + + if (left_point.is_some() && right_point.is_some()) { + std::option::some(Ciphertext { + left: left_point.extract(), + right: right_point.extract() + }) + } else { + std::option::none() + } + } + + /// Creates a ciphertext `(val * G, 0 * G)` where `val` is the plaintext, and the randomness is set to zero. + public fun new_ciphertext_no_randomness(val: &Scalar): Ciphertext { + Ciphertext { + left: ristretto255::basepoint_mul(val), + right: ristretto255::point_identity(), + } + } + + /// Constructs a Twisted ElGamal ciphertext from two `RistrettoPoint`s. + public fun ciphertext_from_points(left: RistrettoPoint, right: RistrettoPoint): Ciphertext { + Ciphertext { + left, + right, + } + } + + /// Constructs a Twisted ElGamal ciphertext from two compressed Ristretto255 points. + public fun ciphertext_from_compressed_points( + left: CompressedRistretto, + right: CompressedRistretto + ): CompressedCiphertext { + CompressedCiphertext { + left, + right, + } + } + + /// Serializes a Twisted ElGamal ciphertext into its byte representation. + public fun ciphertext_to_bytes(ct: &Ciphertext): vector { + let bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.left)); + bytes.append(ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.right))); + bytes + } + + /// Converts a ciphertext into a pair of `RistrettoPoint`s. + public fun ciphertext_into_points(c: Ciphertext): (RistrettoPoint, RistrettoPoint) { + let Ciphertext { left, right } = c; + (left, right) + } + + /// Returns the two `RistrettoPoint`s representing the ciphertext. + public fun ciphertext_as_points(c: &Ciphertext): (&RistrettoPoint, &RistrettoPoint) { + (&c.left, &c.right) + } + + /// Compresses a Twisted ElGamal ciphertext into its `CompressedCiphertext` representation. + public fun compress_ciphertext(ct: &Ciphertext): CompressedCiphertext { + CompressedCiphertext { + left: ristretto255::point_compress(&ct.left), + right: ristretto255::point_compress(&ct.right), + } + } + + /// Decompresses a `CompressedCiphertext` back into its `Ciphertext` representation. + public fun decompress_ciphertext(ct: &CompressedCiphertext): Ciphertext { + Ciphertext { + left: ristretto255::point_decompress(&ct.left), + right: ristretto255::point_decompress(&ct.right), + } + } + + /// Adds two ciphertexts homomorphically, producing a new ciphertext representing the sum of the two. + public fun ciphertext_add(lhs: &Ciphertext, rhs: &Ciphertext): Ciphertext { + Ciphertext { + left: ristretto255::point_add(&lhs.left, &rhs.left), + right: ristretto255::point_add(&lhs.right, &rhs.right), + } + } + + /// Adds two ciphertexts homomorphically, updating the first ciphertext in place. + public fun ciphertext_add_assign(lhs: &mut Ciphertext, rhs: &Ciphertext) { + ristretto255::point_add_assign(&mut lhs.left, &rhs.left); + ristretto255::point_add_assign(&mut lhs.right, &rhs.right); + } + + /// Subtracts one ciphertext from another homomorphically, producing a new ciphertext representing the difference. + public fun ciphertext_sub(lhs: &Ciphertext, rhs: &Ciphertext): Ciphertext { + Ciphertext { + left: ristretto255::point_sub(&lhs.left, &rhs.left), + right: ristretto255::point_sub(&lhs.right, &rhs.right), + } + } + + /// Subtracts one ciphertext from another homomorphically, updating the first ciphertext in place. + public fun ciphertext_sub_assign(lhs: &mut Ciphertext, rhs: &Ciphertext) { + ristretto255::point_sub_assign(&mut lhs.left, &rhs.left); + ristretto255::point_sub_assign(&mut lhs.right, &rhs.right); + } + + /// Creates a copy of the provided ciphertext. + public fun ciphertext_clone(c: &Ciphertext): Ciphertext { + Ciphertext { + left: ristretto255::point_clone(&c.left), + right: ristretto255::point_clone(&c.right), + } + } + + /// Compares two ciphertexts for equality, returning `true` if they encrypt the same value and randomness. + public fun ciphertext_equals(lhs: &Ciphertext, rhs: &Ciphertext): bool { + ristretto255::point_equals(&lhs.left, &rhs.left) && + ristretto255::point_equals(&lhs.right, &rhs.right) + } + + /// Returns the `RistrettoPoint` in the ciphertext that contains the encrypted value in the exponent. + public fun get_value_component(ct: &Ciphertext): &RistrettoPoint { + &ct.left + } + + // + // Test-only functions + // + + #[test_only] + /// Derives a public key from a given secret key using the formula `Y = sk^(-1) * H`. + /// Returns `Some(CompressedPubkey)` if the secret key inversion succeeds, otherwise `None`. + public fun pubkey_from_secret_key(sk: &Scalar): Option { + let sk_invert = ristretto255::scalar_invert(sk); + + if (sk_invert.is_some()) { + let point = ristretto255::point_mul( + &ristretto255::hash_to_point_base(), + &sk_invert.extract() + ); + + std::option::some(CompressedPubkey { + point: ristretto255::point_compress(&point) + }) + } else { + std::option::none() + } + } + + #[test_only] + /// Constructs a ciphertext `(v * point1 + r * point2, r * pubkey)` where `point1` and `point2` are arbitrary points. + public fun new_ciphertext( + v: &Scalar, + point1: &RistrettoPoint, + r: &Scalar, + point2: &RistrettoPoint, + pubkey: &CompressedPubkey + ): Ciphertext { + Ciphertext { + left: ristretto255::double_scalar_mul(v, point1, r, point2), + right: ristretto255::point_mul(&pubkey_to_point(pubkey), r), + } + } + + #[test_only] + /// Constructs a ciphertext `(v * G + r * H, r * Y)` using the Ristretto255 basepoint `G` and a secondary basepoint `H`. + public fun new_ciphertext_with_basepoint(v: &Scalar, r: &Scalar, pubkey: &CompressedPubkey): Ciphertext { + Ciphertext { + left: ristretto255::double_scalar_mul( + v, + &ristretto255::basepoint(), + r, + &ristretto255::hash_to_point_base() + ), + right: ristretto255::point_mul(&pubkey_to_point(pubkey), r), + } + } + + #[test_only] + /// Generates a random Twisted ElGamal key pair (`sk`, `Y`), where `Y = sk^(-1) * H`. + public fun generate_twisted_elgamal_keypair(): (ristretto255::Scalar, CompressedPubkey) { + let sk = ristretto255::random_scalar(); + let pk = pubkey_from_secret_key(&sk); + + (sk, pk.extract()) + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/confidential_asset/ristretto255_twisted_elgamal.spec.move b/aptos-move/framework/aptos-experimental/sources/confidential_asset/ristretto255_twisted_elgamal.spec.move new file mode 100644 index 0000000000000..10a2fa90ba008 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/confidential_asset/ristretto255_twisted_elgamal.spec.move @@ -0,0 +1,2 @@ +spec aptos_experimental::ristretto255_twisted_elgamal { +} diff --git a/aptos-move/framework/aptos-experimental/sources/large_packages.move b/aptos-move/framework/aptos-experimental/sources/large_packages.move new file mode 100644 index 0000000000000..77a7f80ccb1df --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/large_packages.move @@ -0,0 +1,204 @@ +/// # Aptos Large Packages Framework +/// +/// This module provides a framework for uploading large packages to the Aptos network, under standard +/// accounts or objects. +/// To publish using this API, you must divide your metadata and modules across multiple calls +/// into `large_packages::stage_code_chunk`. +/// In each pass, the caller pushes more code by calling `stage_code_chunk`. +/// In the final call, the caller can use `stage_code_chunk_and_publish_to_account`, `stage_code_chunk_and_publish_to_object`, or +/// `stage_code_chunk_and_upgrade_object_code` to upload the final data chunk and publish or upgrade the package on-chain. +/// +/// The above logic is currently implemented in the Python +/// SDK: [`aptos-python-sdk`](https://github.com/aptos-labs/aptos-python-sdk/blob/main/aptos_sdk/package_publisher.py). +/// +/// Aptos CLI supports this as well with `--chunked-publish` flag: +/// - `aptos move publish [OPTIONS] --chunked-publish` +/// - `aptos move create-object-and-publish-package [OPTIONS] --address-name --chunked-publish` +/// - `aptos move upgrade-object-package [OPTIONS] --address-name --chunked-publish` +/// +/// # Usage +/// +/// 1. **Stage Code Chunks**: +/// - Call `stage_code_chunk` with the appropriate metadata and code chunks. +/// - Ensure that `code_indices` are provided from `0` to `last_module_idx`, without any +/// gaps. +/// +/// +/// 2. **Publish or Upgrade**: +/// - In order to upload the last data chunk and publish the package, call `stage_code_chunk_and_publish_to_account` or `stage_code_chunk_and_publish_to_object`. +/// +/// - For object code upgrades, call `stage_code_chunk_and_upgrade_object_code` with the argument `code_object` provided. +/// +/// 3. **Cleanup**: +/// - In order to remove `StagingArea` resource from an account, call `cleanup_staging_area`. +/// +/// # Notes +/// +/// * Make sure LargePackages is deployed to your network of choice, you can currently find it both on +/// mainnet and testnet at `0xa29df848eebfe5d981f708c2a5b06d31af2be53bbd8ddc94c8523f4b903f7adb`, and +/// in 0x7 (aptos-experimental) on devnet/localnet. +/// * Ensure that `code_indices` have no gaps. For example, if code_indices are +/// provided as [0, 1, 3] (skipping index 2), the inline function `assemble_module_code` will abort +/// since `StagingArea.last_module_idx` is set as the max value of the provided index +/// from `code_indices`, and `assemble_module_code` will lookup the `StagingArea.code` SmartTable from +/// 0 to `StagingArea.last_module_idx` in turn. +module aptos_experimental::large_packages { + use std::error; + use std::signer; + use std::vector; + use aptos_std::smart_table::{Self, SmartTable}; + + use aptos_framework::code::{Self, PackageRegistry}; + use aptos_framework::object::{Object}; + use aptos_framework::object_code_deployment; + + /// code_indices and code_chunks should be the same length. + const ECODE_MISMATCH: u64 = 1; + /// Object reference should be provided when upgrading object code. + const EMISSING_OBJECT_REFERENCE: u64 = 2; + + struct StagingArea has key { + metadata_serialized: vector, + code: SmartTable>, + last_module_idx: u64, + } + + public entry fun stage_code_chunk( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + } + + public entry fun stage_code_chunk_and_publish_to_account( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + publish_to_account(owner, staging_area); + cleanup_staging_area(owner); + } + + public entry fun stage_code_chunk_and_publish_to_object( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + publish_to_object(owner, staging_area); + cleanup_staging_area(owner); + } + + public entry fun stage_code_chunk_and_upgrade_object_code( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + code_object: Object, + ) acquires StagingArea { + let staging_area = stage_code_chunk_internal(owner, metadata_chunk, code_indices, code_chunks); + upgrade_object_code(owner, staging_area, code_object); + cleanup_staging_area(owner); + } + + inline fun stage_code_chunk_internal( + owner: &signer, + metadata_chunk: vector, + code_indices: vector, + code_chunks: vector>, + ): &mut StagingArea acquires StagingArea { + assert!( + vector::length(&code_indices) == vector::length(&code_chunks), + error::invalid_argument(ECODE_MISMATCH), + ); + + let owner_address = signer::address_of(owner); + + if (!exists(owner_address)) { + move_to(owner, StagingArea { + metadata_serialized: vector[], + code: smart_table::new(), + last_module_idx: 0, + }); + }; + + let staging_area = borrow_global_mut(owner_address); + + if (!vector::is_empty(&metadata_chunk)) { + vector::append(&mut staging_area.metadata_serialized, metadata_chunk); + }; + + let i = 0; + while (i < vector::length(&code_chunks)) { + let inner_code = *vector::borrow(&code_chunks, i); + let idx = (*vector::borrow(&code_indices, i) as u64); + + if (smart_table::contains(&staging_area.code, idx)) { + vector::append(smart_table::borrow_mut(&mut staging_area.code, idx), inner_code); + } else { + smart_table::add(&mut staging_area.code, idx, inner_code); + if (idx > staging_area.last_module_idx) { + staging_area.last_module_idx = idx; + } + }; + i = i + 1; + }; + + staging_area + } + + inline fun publish_to_account( + publisher: &signer, + staging_area: &mut StagingArea, + ) { + let code = assemble_module_code(staging_area); + code::publish_package_txn(publisher, staging_area.metadata_serialized, code); + } + + inline fun publish_to_object( + publisher: &signer, + staging_area: &mut StagingArea, + ) { + let code = assemble_module_code(staging_area); + object_code_deployment::publish(publisher, staging_area.metadata_serialized, code); + } + + inline fun upgrade_object_code( + publisher: &signer, + staging_area: &mut StagingArea, + code_object: Object, + ) { + let code = assemble_module_code(staging_area); + object_code_deployment::upgrade(publisher, staging_area.metadata_serialized, code, code_object); + } + + inline fun assemble_module_code( + staging_area: &mut StagingArea, + ): vector> { + let last_module_idx = staging_area.last_module_idx; + let code = vector[]; + let i = 0; + while (i <= last_module_idx) { + vector::push_back( + &mut code, + *smart_table::borrow(&staging_area.code, i) + ); + i = i + 1; + }; + code + } + + public entry fun cleanup_staging_area(owner: &signer) acquires StagingArea { + let StagingArea { + metadata_serialized: _, + code, + last_module_idx: _, + } = move_from(signer::address_of(owner)); + smart_table::destroy(code); + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/test_derivable_account_abstraction_ed25519_hex.move b/aptos-move/framework/aptos-experimental/sources/test_derivable_account_abstraction_ed25519_hex.move new file mode 100644 index 0000000000000..a61a7db623c94 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/test_derivable_account_abstraction_ed25519_hex.move @@ -0,0 +1,36 @@ +/// Domain account abstraction using ed25519 hex for signing. +/// +/// Authentication takes digest, converts to hex (prefixed with 0x, with lowercase letters), +/// and then expects that to be signed. +/// authenticator is expected to be signature: vector +/// account_identity is raw public_key. +module aptos_experimental::test_derivable_account_abstraction_ed25519_hex { + use std::error; + use aptos_std::string_utils; + use aptos_std::ed25519::{ + Self, + new_signature_from_bytes, + new_unvalidated_public_key_from_bytes, + }; + use aptos_framework::auth_data::AbstractionAuthData; + + const EINVALID_SIGNATURE: u64 = 1; + + /// Authorization function for domain account abstraction. + public fun authenticate(account: signer, aa_auth_data: AbstractionAuthData): signer { + let hex_digest = string_utils::to_string(aa_auth_data.digest()); + + let public_key = new_unvalidated_public_key_from_bytes(*aa_auth_data.derivable_abstract_public_key()); + let signature = new_signature_from_bytes(*aa_auth_data.derivable_abstract_signature()); + assert!( + ed25519::signature_verify_strict( + &signature, + &public_key, + *hex_digest.bytes(), + ), + error::permission_denied(EINVALID_SIGNATURE) + ); + + account + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/test_function_values.move b/aptos-move/framework/aptos-experimental/sources/test_function_values.move new file mode 100644 index 0000000000000..a84d50b441677 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/test_function_values.move @@ -0,0 +1,9 @@ +module aptos_experimental::test_function_values { + struct Funcs { + f: |u64| u64 has drop + copy, + } + + fun transfer_and_create_account(some_f: |u64|u64): u64 { + some_f(3) + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/market/market.move b/aptos-move/framework/aptos-experimental/sources/trading/market/market.move new file mode 100644 index 0000000000000..0771349adb5f7 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/market/market.move @@ -0,0 +1,1032 @@ +/// This module provides a generic trading engine implementation for a market. On a high level, its a data structure, +/// that stores an order book and provides APIs to place orders, cancel orders, and match orders. The market also acts +/// as a wrapper around the order book and pluggable clearinghouse implementation. +/// A clearing house implementation is expected to implement the following APIs +/// - settle_trade(taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size): SettleTradeResult -> +/// Called by the market when there is an match between taker and maker. The clearinghouse is expected to settle the trade +/// and return the result. Please note that the clearing house settlment size might not be the same as the order match size and +/// the settlement might also fail. The fill_id is an incremental counter for matched orders and can be used to track specific fills +/// - validate_order_placement(account, is_taker, is_long, price, size): bool -> Called by the market to validate +/// an order when its placed. The clearinghouse is expected to validate the order and return true if the order is valid. +/// Checkout clearinghouse_test as an example of the simplest form of clearing house implementation that just tracks +/// the position size of the user and does not do any validation. +/// +/// - place_maker_order(account, order_id, is_bid, price, size, metadata) -> Called by the market before placing the +/// maker order in the order book. The clearinghouse can use this to track pending orders in the order book and perform +/// any other book keeping operations. +/// +/// - cleanup_order(account, order_id, is_bid, remaining_size) -> Called by the market when an order is cancelled or fully filled +/// The clearinhouse can perform any cleanup operations like removing the order from the pending orders list. For every order placement +/// that passes the validate_order_placement check, +/// the market guarantees that the cleanup_order API will be called once and only once with the remaining size of the order. +/// +/// - decrease_order_size(account, order_id, is_bid, price, size) -> Called by the market when a maker order is decreased +/// in size by the user. Please note that this API will only be called after place_maker_order is called and the order is +/// already in the order book. Size in this case is the remaining size of the order after the decrease. +/// +/// Following are some valid sequence of API calls that the market makes to the clearinghouse: +/// 1. validate_order_placement(10) +/// 2. settle_trade(2) +/// 3. settle_trade(3) +/// 4. place_maker_order(5) +/// 5. decrease_order_size(2) +/// 6. decrease_order_size(1) +/// 7. cleanup_order(2) +/// or +/// 1. validate_order_placement(10) +/// 2. cleanup_order(10) +/// +/// Upon placement of an order, the market generates an order id and emits an event with the order details - the order id +/// is a unique id for the order that can be used to later get the status of the order or cancel the order. +/// +/// Market also supports various conditions for order matching like Good Till Cancelled (GTC), Post Only, Immediate or Cancel (IOC). +/// GTC orders are orders that are valid until they are cancelled or filled. Post Only orders are orders that are valid only if they are not +/// taker orders. IOC orders are orders that are valid only if they are taker orders. +/// +/// In addition, the market also supports trigger conditions for orders. An order with trigger condition is not put +/// on the order book until its trigger conditions are met. Following trigger conditions are supported: +/// TakeProfit(price): If its a buy order its triggered when the market price is greater than or equal to the price. If +/// its a sell order its triggered when the market price is less than or equal to the price. +/// StopLoss(price): If its a buy order its triggered when the market price is less than or equal to the price. If its +/// a sell order its triggered when the market price is greater than or equal to the price. +/// TimeBased(time): The order is triggered when the current time is greater than or equal to the time. +/// +module aptos_experimental::market { + + use std::option; + use std::option::Option; + use std::signer; + use std::string::String; + use std::vector; + use aptos_framework::event; + use aptos_experimental::order_book::{OrderBook, new_order_book, new_order_request}; + use aptos_experimental::order_book_types::{TriggerCondition, Order}; + use aptos_experimental::market_types::MarketClearinghouseCallbacks; + + // Error codes + const EINVALID_ORDER: u64 = 1; + const EORDER_BOOK_FULL: u64 = 2; + const EMARKET_NOT_FOUND: u64 = 3; + const ENOT_ADMIN: u64 = 4; + const EINVALID_FEE_TIER: u64 = 5; + const EORDER_DOES_NOT_EXIST: u64 = 6; + const EINVALID_TIME_IN_FORCE_FOR_MAKER: u64 = 7; + const EINVALID_TIME_IN_FORCE_FOR_TAKER: u64 = 8; + const EINVALID_MATCHING_FOR_MAKER_REINSERT: u64 = 9; + const EINVALID_TAKER_POSITION_UPDATE: u64 = 10; + const EINVALID_LIQUIDATION: u64 = 11; + + /// Order time in force + /// Good till cancelled order type + const TIME_IN_FORCE_GTC: u8 = 0; + /// Post Only order type - ensures that the order is not a taker order + const TIME_IN_FORCE_POST_ONLY: u8 = 1; + /// Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the + /// order as possible as taker order and cancel the rest. + const TIME_IN_FORCE_IOC: u8 = 2; + + public fun good_till_cancelled(): u8 { + TIME_IN_FORCE_GTC + } + + public fun post_only(): u8 { + TIME_IN_FORCE_POST_ONLY + } + + public fun immediate_or_cancel(): u8 { + TIME_IN_FORCE_IOC + } + + struct Market has store { + /// Address of the parent object that created this market + /// Purely for grouping events based on the source DEX, not used otherwise + parent: address, + /// Address of the market object of this market. + market: address, + // TODO: remove sequential order id generation + last_order_id: u64, + // Incremental fill id for matched orders + next_fill_id: u64, + config: MarketConfig, + order_book: OrderBook + } + + struct MarketConfig has store { + /// Weather to allow self matching orders + allow_self_trade: bool, + /// Whether to allow sending all events for the markett + allow_events_emission: bool + } + + /// Order has been accepted by the engine. + const ORDER_STATUS_OPEN: u8 = 0; + /// Order has been fully or partially filled. + const ORDER_STATUS_FILLED: u8 = 1; + /// Order has been cancelled by the user or engine. + const ORDER_STATUS_CANCELLED: u8 = 2; + /// Order has been rejected by the engine. Unlike cancelled orders, rejected + /// orders are invalid orders. Rejection reasons: + /// 1. Insufficient margin + /// 2. Order is reduce_only but does not reduce + const ORDER_STATUS_REJECTED: u8 = 3; + const ORDER_SIZE_REDUCED: u8 = 4; + + public fun order_status_open(): u8 { + ORDER_STATUS_OPEN + } + + public fun order_status_filled(): u8 { + ORDER_STATUS_FILLED + } + + public fun order_status_cancelled(): u8 { + ORDER_STATUS_CANCELLED + } + + public fun order_status_rejected(): u8 { + ORDER_STATUS_REJECTED + } + + #[event] + struct OrderEvent has drop, copy, store { + parent: address, + market: address, + order_id: u64, + user: address, + /// Original size of the order + orig_size: u64, + /// Remaining size of the order in the order book + remaining_size: u64, + // TODO(bl): Brian and Sean will revisit to see if we should have split + // into multiple events for OrderEvent + /// OPEN - size_delta will be amount of size added + /// CANCELLED - size_delta will be amount of size removed + /// FILLED - size_delta will be amount of size filled + /// REJECTED - size_delta will always be 0 + size_delta: u64, + price: u64, + is_buy: bool, + /// Whether the order crosses the orderbook. + is_taker: bool, + status: u8, + details: std::string::String + } + + enum OrderCancellationReason has drop, copy { + PostOnlyViolation, + IOCViolation, + PositionUpdateViolation, + ReduceOnlyViolation, + ClearinghouseSettleViolation, + MaxFillLimitViolation + } + + struct OrderMatchResult has drop { + order_id: u64, + remaining_size: u64, + cancel_reason: Option, + fill_sizes: vector + } + + public fun destroy_order_match_result( + self: OrderMatchResult + ): (u64, u64, Option, vector) { + let OrderMatchResult { order_id, remaining_size, cancel_reason, fill_sizes } = + self; + (order_id, remaining_size, cancel_reason, fill_sizes) + } + + public fun number_of_fills(self: &OrderMatchResult): u64 { + self.fill_sizes.length() + } + + public fun total_fill_size(self: &OrderMatchResult): u64 { + self.fill_sizes.fold(0, |acc, fill_size| acc + fill_size) + } + + public fun get_cancel_reason(self: &OrderMatchResult): Option { + self.cancel_reason + } + + public fun get_remaining_size_from_result(self: &OrderMatchResult): u64 { + self.remaining_size + } + + public fun is_ioc_violation(self: OrderCancellationReason): bool { + return self == OrderCancellationReason::IOCViolation + } + + public fun is_fill_limit_violation( + cancel_reason: OrderCancellationReason + ): bool { + return cancel_reason == OrderCancellationReason::MaxFillLimitViolation + } + + public fun get_order_id(self: OrderMatchResult): u64 { + self.order_id + } + + public fun new_market_config( + allow_self_matching: bool, allow_events_emission: bool + ): MarketConfig { + MarketConfig { allow_self_trade: allow_self_matching, allow_events_emission: allow_events_emission } + } + + public fun new_market( + parent: &signer, market: &signer, config: MarketConfig + ): Market { + // requiring signers, and not addresses, purely to guarantee different dexes + // cannot polute events to each other, accidentally or maliciously. + Market { + parent: signer::address_of(parent), + market: signer::address_of(market), + last_order_id: 0, + next_fill_id: 0, + config, + order_book: new_order_book() + } + } + + public fun get_market(self: &Market): address { + self.market + } + + public fun get_order_book(self: &Market): &OrderBook { + &self.order_book + } + + public fun get_order_book_mut( + self: &mut Market + ): &mut OrderBook { + &mut self.order_book + } + + public fun best_bid_price(self: &Market): Option { + self.order_book.best_bid_price() + } + + public fun best_ask_price(self: &Market): Option { + self.order_book.best_ask_price() + } + + public fun is_taker_order( + self: &Market, + price: u64, + is_buy: bool, + trigger_condition: Option + ): bool { + self.order_book.is_taker_order(price, is_buy, trigger_condition) + } + + /// Places an order - If its a taker order, it will be matched immediately and if its a maker order, it will simply + /// be placed in the order book. An order id is generated when the order is placed and this id can be used to + /// uniquely identify the order for this market and can also be used to get the status of the order or cancel the order. + /// The order is placed with the following parameters: + /// - user: The user who is placing the order + /// - price: The price at which the order is placed + /// - orig_size: The original size of the order + /// - is_buy: Whether the order is a buy order or a sell order + /// - time_in_force: The time in force for the order. This can be one of the following: + /// - TIME_IN_FORCE_GTC: Good till cancelled order type + /// - TIME_IN_FORCE_POST_ONLY: Post Only order type - ensures that the order is not a taker order + /// - TIME_IN_FORCE_IOC: Immediate or Cancel order type - ensures that the order is a taker order. Try to match as much of the + /// order as possible as taker order and cancel the rest. + /// - trigger_condition: The trigger condition + /// - metadata: The metadata for the order. This can be any type that the clearing house implementation supports. + /// - max_fill_limit: The maximum fill limit for the order. This is the maximum number of fills to trigger for this order. + /// This knob is present to configure maximum amount of gas any order placement transaction might consume and avoid + /// hitting the maximum has limit of the blockchain. + /// - emit_cancel_on_fill_limit: bool,: Whether to emit an order cancellation event when the fill limit is reached. + /// This is used ful as the caller might not want to cancel the order when the limit is reached and can continue + /// that order in a separate transaction. + /// - callbacks: The callbacks for the market clearinghouse. This is a struct that implements the MarketClearinghouseCallbacks + /// interface. This is used to validate the order and settle the trade. + /// Returns the order id, remaining size, cancel reason and number of fills for the order. + public fun place_order( + self: &mut Market, + user: &signer, + price: u64, + orig_size: u64, + is_bid: bool, + time_in_force: u8, + trigger_condition: Option, + metadata: M, + max_fill_limit: u64, + emit_cancel_on_fill_limit: bool, + callbacks: &MarketClearinghouseCallbacks + ): OrderMatchResult { + let order_id = self.next_order_id(); + self.place_order_with_order_id( + signer::address_of(user), + price, + orig_size, + orig_size, + is_bid, + time_in_force, + trigger_condition, + metadata, + order_id, + max_fill_limit, + emit_cancel_on_fill_limit, + true, + callbacks + ) + } + + public fun next_order_id(self: &mut Market): u64 { + self.last_order_id += 1; + self.last_order_id + } + + fun next_fill_id(self: &mut Market): u64 { + let next_fill_id = self.next_fill_id; + self.next_fill_id += 1; + next_fill_id + } + + fun emit_event_for_order( + self: &Market, + order_id: u64, + user: address, + orig_size: u64, + remaining_size: u64, + size_delta: u64, + price: u64, + is_bid: bool, + is_taker: bool, + status: u8, + details: &String + ) { + // Final check whether event sending is enabled + if (self.config.allow_events_emission) { + event::emit( + OrderEvent { + parent: self.parent, + market: self.market, + order_id, + user, + orig_size, + remaining_size, + size_delta, + price, + is_buy: is_bid, + is_taker, + status, + details: *details + } + ); + }; + } + + /// Similar to `place_order` API but instead of a signer, it takes a user address - can be used in case trading + /// functionality is delegated to a different address. Please note that it is the responsibility of the caller + /// to verify that the transaction signer is authorized to place orders on behalf of the user. + public fun place_order_with_user_addr( + self: &mut Market, + user_addr: address, + price: u64, + orig_size: u64, + is_bid: bool, + time_in_force: u8, + trigger_condition: Option, + metadata: M, + max_fill_limit: u64, + emit_cancel_on_fill_limit: bool, + callbacks: &MarketClearinghouseCallbacks + ): OrderMatchResult { + let order_id = self.next_order_id(); + self.place_order_with_order_id( + user_addr, + price, + orig_size, + orig_size, + is_bid, + time_in_force, + trigger_condition, + metadata, + order_id, + max_fill_limit, + emit_cancel_on_fill_limit, + true, + callbacks + ) + } + + fun place_maker_order_internal( + self: &mut Market, + user_addr: address, + price: u64, + orig_size: u64, + remaining_size: u64, + fill_sizes: vector, + is_bid: bool, + time_in_force: u8, + trigger_condition: Option, + metadata: M, + order_id: u64, + emit_order_open: bool, + callbacks: &MarketClearinghouseCallbacks + ): OrderMatchResult { + // Validate that the order is valid from position management perspective + if (time_in_force == TIME_IN_FORCE_IOC) { + return self.cancel_order_internal( + user_addr, + price, + order_id, + orig_size, + remaining_size, + fill_sizes, + is_bid, + false, // is_taker + OrderCancellationReason::IOCViolation, + std::string::utf8(b"IOC Violation"), + callbacks + ); + }; + + if (emit_order_open) { + emit_event_for_order( + self, + order_id, + user_addr, + orig_size, + remaining_size, + orig_size, + price, + is_bid, + false, // is_taker + ORDER_STATUS_OPEN, + &std::string::utf8(b"") + ); + }; + + callbacks.place_maker_order( + user_addr, order_id, is_bid, price, remaining_size, metadata + ); + self.order_book.place_maker_order( + new_order_request( + user_addr, + order_id, + option::none(), + price, + orig_size, + remaining_size, + is_bid, + trigger_condition, + metadata + ) + ); + return OrderMatchResult { + order_id, + remaining_size, + cancel_reason: option::none(), + fill_sizes + } + } + + fun cancel_maker_order_internal( + self: &mut Market, + maker_order: &Order, + order_id: u64, + maker_address: address, + maker_cancellation_reason: String, + unsettled_size: u64, + callbacks: &MarketClearinghouseCallbacks + ) { + let maker_cancel_size = unsettled_size + maker_order.get_remaining_size(); + + emit_event_for_order( + self, + order_id, + maker_address, + maker_order.get_orig_size(), + 0, + maker_cancel_size, + maker_order.get_price(), + maker_order.is_bid(), + false, + ORDER_STATUS_CANCELLED, + &maker_cancellation_reason + ); + // If the maker is invalid cancel the maker order and continue to the next maker order + if (maker_order.get_remaining_size() != 0) { + self.order_book.cancel_order(maker_address, order_id); + }; + callbacks.cleanup_order( + maker_address, order_id, maker_order.is_bid(), maker_cancel_size + ); + } + + fun cancel_order_internal( + self: &mut Market, + user_addr: address, + price: u64, + order_id: u64, + orig_size: u64, + size_delta: u64, + fill_sizes: vector, + is_bid: bool, + is_taker: bool, + cancel_reason: OrderCancellationReason, + cancel_details: String, + callbacks: &MarketClearinghouseCallbacks + ): OrderMatchResult { + emit_event_for_order( + self, + order_id, + user_addr, + orig_size, + 0, // remaining size + size_delta, + price, + is_bid, + is_taker, + ORDER_STATUS_CANCELLED, + &cancel_details + ); + callbacks.cleanup_order( + user_addr, order_id, is_bid, size_delta + ); + return OrderMatchResult { + order_id, + remaining_size: 0, + cancel_reason: option::some(cancel_reason), + fill_sizes + } + } + + /// Similar to `place_order` API but allows few extra parameters as follows + /// - order_id: The order id for the order - this is needed because for orders with trigger conditions, the order + /// id is generated when the order is placed and when they are triggered, the same order id is used to match the order. + /// - emit_taker_order_open: bool: Whether to emit an order open event for the taker order - this is used when + /// the caller do not wants to emit an open order event for a taker in case the taker order was intterrupted because + /// of fill limit violation in the previous transaction and the order is just a continuation of the previous order. + public fun place_order_with_order_id( + self: &mut Market, + user_addr: address, + price: u64, + orig_size: u64, + remaining_size: u64, + is_bid: bool, + time_in_force: u8, + trigger_condition: Option, + metadata: M, + order_id: u64, + max_fill_limit: u64, + cancel_on_fill_limit: bool, + emit_taker_order_open: bool, + callbacks: &MarketClearinghouseCallbacks + ): OrderMatchResult { + assert!( + orig_size > 0 && remaining_size > 0, + EINVALID_ORDER + ); + // TODO(skedia) is_taker_order API can actually return false positive as the maker orders might not be valid. + // Changes are needed to ensure the maker order is valid for this order to be a valid taker order. + // TODO(skedia) reconsile the semantics around global order id vs account local id. + if ( + !callbacks.validate_order_placement( + user_addr, + order_id, + true, // is_taker + is_bid, + price, + remaining_size, + metadata + )) { + return self.cancel_order_internal( + user_addr, + price, + order_id, + orig_size, + 0, // 0 because order was never placed + vector[], + is_bid, + true, // is_taker + OrderCancellationReason::PositionUpdateViolation, + std::string::utf8(b"Position Update violation"), + callbacks + ); + }; + + let is_taker_order = + self.order_book.is_taker_order(price, is_bid, trigger_condition); + if (emit_taker_order_open) { + emit_event_for_order( + self, + order_id, + user_addr, + orig_size, + remaining_size, + orig_size, + price, + is_bid, + is_taker_order, + ORDER_STATUS_OPEN, + &std::string::utf8(b"") + ); + }; + if (!is_taker_order) { + return self.place_maker_order_internal( + user_addr, + price, + orig_size, + remaining_size, + vector[], + is_bid, + time_in_force, + trigger_condition, + metadata, + order_id, + false, + callbacks + ); + }; + + // NOTE: We should always use is_taker: true for this order past this + // point so that indexer can consistently track the order's status + if (time_in_force == TIME_IN_FORCE_POST_ONLY) { + return self.cancel_order_internal( + user_addr, + price, + order_id, + orig_size, + remaining_size, + vector[], + is_bid, + true, // is_taker + OrderCancellationReason::PostOnlyViolation, + std::string::utf8(b"Post Only violation"), + callbacks + ); + }; + let fill_sizes = vector::empty(); + loop { + let result = + self.order_book.get_single_match_for_taker(price, remaining_size, is_bid); + let (maker_order, maker_matched_size) = result.destroy_single_order_match(); + let (maker_address, maker_order_id) = + maker_order.get_order_id().destroy_order_id_type(); + if (!self.config.allow_self_trade && maker_address == user_addr) { + self.cancel_maker_order_internal( + &maker_order, + maker_order_id, + maker_address, + std::string::utf8(b"Disallowed self trading"), + maker_matched_size, + callbacks + ); + continue; + }; + + let fill_id = self.next_fill_id(); + + let settle_result = + callbacks.settle_trade( + user_addr, + maker_address, + order_id, + maker_order_id, + fill_id, + is_bid, + maker_order.get_price(), // Order is always matched at the price of the maker + maker_matched_size, + metadata, + maker_order.get_metadata_from_order() + ); + + let unsettled_maker_size = maker_matched_size; + let settled_size = settle_result.get_settled_size(); + if (settled_size > 0) { + remaining_size -= settled_size; + unsettled_maker_size -= settled_size; + fill_sizes.push_back(settled_size); + // Event for taker fill + emit_event_for_order( + self, + order_id, + user_addr, + orig_size, + remaining_size, + settled_size, + maker_order.get_price(), + is_bid, + true, // is_taker + ORDER_STATUS_FILLED, + &std::string::utf8(b"") + ); + // Event for maker fill + emit_event_for_order( + self, + maker_order_id, + maker_address, + maker_order.get_orig_size(), + maker_order.get_remaining_size() + unsettled_maker_size, + settled_size, + maker_order.get_price(), + !is_bid, + false, // is_taker + ORDER_STATUS_FILLED, + &std::string::utf8(b"") + ); + }; + + let maker_cancellation_reason = settle_result.get_maker_cancellation_reason(); + if (maker_cancellation_reason.is_some()) { + self.cancel_maker_order_internal( + &maker_order, + maker_order_id, + maker_address, + maker_cancellation_reason.destroy_some(), + unsettled_maker_size, + callbacks + ); + }; + + let taker_cancellation_reason = settle_result.get_taker_cancellation_reason(); + if (taker_cancellation_reason.is_some()) { + let result = + self.cancel_order_internal( + user_addr, + price, + order_id, + orig_size, + remaining_size, + fill_sizes, + is_bid, + true, // is_taker + OrderCancellationReason::ClearinghouseSettleViolation, + taker_cancellation_reason.destroy_some(), + callbacks + ); + if (maker_cancellation_reason.is_none() && unsettled_maker_size > 0) { + // If the taker is cancelled but the maker is not cancelled, then we need to re-insert + // the maker order back into the order book + self.order_book.reinsert_maker_order( + new_order_request( + maker_address, + maker_order_id, + option::some(maker_order.get_unique_priority_idx()), + maker_order.get_price(), + maker_order.get_orig_size(), + unsettled_maker_size, + !is_bid, + option::none(), + maker_order.get_metadata_from_order() + ) + ); + }; + return result; + }; + + if (maker_order.get_remaining_size() == 0) { + callbacks.cleanup_order( + maker_address, + maker_order_id, + !is_bid, // is_bid is inverted for maker orders + 0 // 0 because the order is fully filled + ); + }; + if (remaining_size == 0) { + callbacks.cleanup_order( + user_addr, order_id, is_bid, 0 // 0 because the order is fully filled + ); + break; + }; + + // Check if the next iteration will still match + let is_taker_order = + self.order_book.is_taker_order(price, is_bid, option::none()); + if (!is_taker_order) { + if (time_in_force == TIME_IN_FORCE_IOC) { + return self.cancel_order_internal( + user_addr, + price, + order_id, + orig_size, + remaining_size, + fill_sizes, + is_bid, + true, // is_taker + OrderCancellationReason::IOCViolation, + std::string::utf8(b"IOC_VIOLATION"), + callbacks + ); + } else { + // If the order is not a taker order, then we can place it as a maker order + return self.place_maker_order_internal( + user_addr, + price, + orig_size, + remaining_size, + fill_sizes, + is_bid, + time_in_force, + trigger_condition, + metadata, + order_id, + true, // emit_order_open + callbacks + ); + }; + }; + + if (fill_sizes.length() >= max_fill_limit) { + if (cancel_on_fill_limit) { + return self.cancel_order_internal( + user_addr, + price, + order_id, + orig_size, + remaining_size, + fill_sizes, + is_bid, + true, // is_taker + OrderCancellationReason::MaxFillLimitViolation, + std::string::utf8(b"Max fill limit reached"), + callbacks + ); + } else { + return OrderMatchResult { + order_id, + remaining_size, + cancel_reason: option::some( + OrderCancellationReason::MaxFillLimitViolation + ), + fill_sizes + } + }; + }; + }; + OrderMatchResult { + order_id, + remaining_size, + cancel_reason: option::none(), + fill_sizes + } + } + + /// Cancels an order - this will cancel the order and emit an event for the order cancellation. + public fun cancel_order( + self: &mut Market, + user: &signer, + order_id: u64, + callbacks: &MarketClearinghouseCallbacks + ) { + let account = signer::address_of(user); + let maybe_order = self.order_book.cancel_order(account, order_id); + if (maybe_order.is_some()) { + let order = maybe_order.destroy_some(); + let ( + order_id_type, + _unique_priority_idx, + price, + orig_size, + remaining_size, + is_bid, + _trigger_condition, + _metadata + ) = order.destroy_order(); + callbacks.cleanup_order( + account, order_id, is_bid, remaining_size + ); + let (user, order_id) = order_id_type.destroy_order_id_type(); + emit_event_for_order( + self, + order_id, + user, + orig_size, + remaining_size, + remaining_size, + price, + is_bid, + false, // is_taker + ORDER_STATUS_CANCELLED, + &std::string::utf8(b"Order cancelled") + ); + } + } + + /// Cancels an order - this will cancel the order and emit an event for the order cancellation. + public fun decrease_order_size( + self: &mut Market, + user: &signer, + order_id: u64, + size_delta: u64, + callbacks: &MarketClearinghouseCallbacks + ) { + let account = signer::address_of(user); + self.order_book.decrease_order_size(account, order_id, size_delta); + let maybe_order = self.order_book.get_order(account, order_id); + assert!(maybe_order.is_some(), EORDER_DOES_NOT_EXIST); + let (order, _) = maybe_order.destroy_some().destroy_order_from_state(); + let ( + order_id_type, + _unique_priority_idx, + price, + orig_size, + remaining_size, + is_bid, + _trigger_condition, + _metadata + ) = order.destroy_order(); + let (user, order_id) = order_id_type.destroy_order_id_type(); + callbacks.decrease_order_size( + user, order_id, is_bid, price, remaining_size + ); + + emit_event_for_order( + self, + order_id, + user, + orig_size, + remaining_size, + size_delta, + price, + is_bid, + false, // is_taker + ORDER_SIZE_REDUCED, + &std::string::utf8(b"Order size reduced") + ); + } + + /// Remaining size of the order in the order book. + public fun get_remaining_size( + self: &Market, user: address, order_id: u64 + ): u64 { + self.order_book.get_remaining_size(user, order_id) + } + + /// Returns all the pending order ready to be executed based on the oracle price. The caller is responsible to + /// call the `place_order_with_order_id` API to place the order with the order id returned from this API. + public fun take_ready_price_based_orders( + self: &mut Market, oracle_price: u64 + ): vector> { + self.order_book.take_ready_price_based_orders(oracle_price) + } + + /// Returns all the pending order that are ready to be executed based on current time stamp. The caller is responsible to + /// call the `place_order_with_order_id` API to place the order with the order id returned from this API. + public fun take_ready_time_based_orders( + self: &mut Market + ): vector> { + self.order_book.take_ready_time_based_orders() + } + + // ============================= test_only APIs ==================================== + #[test_only] + public fun destroy_market(self: Market) { + let Market { + parent: _parent, + market: _market, + last_order_id: _last_order_id, + next_fill_id: _next_fill_id, + config, + order_book + } = self; + let MarketConfig { allow_self_trade: _, allow_events_emission: _ } = config; + order_book.destroy_order_book() + } + + #[test_only] + public fun is_clearinghouse_settle_violation( + cancellation_reason: OrderCancellationReason + ): bool { + if (cancellation_reason + == OrderCancellationReason::ClearinghouseSettleViolation) { + return true; + }; + false + } + + #[test_only] + public fun get_order_id_from_event(self: OrderEvent): u64 { + self.order_id + } + + #[test_only] + public fun verify_order_event( + self: OrderEvent, + order_id: u64, + market: address, + user: address, + orig_size: u64, + remaining_size: u64, + size_delta: u64, + price: u64, + is_buy: bool, + is_taker: bool, + status: u8 + ) { + assert!(self.order_id == order_id); + assert!(self.market == market); + assert!(self.user == user); + assert!(self.orig_size == orig_size); + assert!(self.remaining_size == remaining_size); + assert!(self.size_delta == size_delta); + assert!(self.price == price); + assert!(self.is_buy == is_buy); + assert!(self.is_taker == is_taker); + assert!(self.status == status); + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/market/market_types.move b/aptos-move/framework/aptos-experimental/sources/trading/market/market_types.move new file mode 100644 index 0000000000000..3d2251cc6ee74 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/market/market_types.move @@ -0,0 +1,125 @@ +module aptos_experimental::market_types { + use std::option::Option; + use std::string::String; + + const EINVALID_ADDRESS: u64 = 1; + const EINVALID_SETTLE_RESULT: u64 = 2; + + struct SettleTradeResult has drop { + settled_size: u64, + maker_cancellation_reason: Option, + taker_cancellation_reason: Option + } + + struct MarketClearinghouseCallbacks has drop { + // settle_trade_f arguments: taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size + settle_trade_f: |address, address, u64, u64, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy, + // validate_settlement_update_f arguments: account, is_taker, is_long, price, size + validate_order_placement_f: |address, u64, bool, bool, u64, u64, M| bool has drop + copy, + // place_maker_order_f arguments: account, order_id, is_bid, price, size, order_metadata + place_maker_order_f: |address, u64, bool, u64, u64, M| has drop + copy, + // cleanup_order_f arguments: account, order_id, is_bid, remaining_size + cleanup_order_f: |address, u64, bool, u64| has drop + copy, + // decrease_order_size_f arguments: account, order_id, is_bid, price, size + decrease_order_size_f: |address, u64, bool, u64, u64| has drop + copy, + } + + public fun new_settle_trade_result( + settled_size: u64, + maker_cancellation_reason: Option, + taker_cancellation_reason: Option + ): SettleTradeResult { + SettleTradeResult { + settled_size, + maker_cancellation_reason, + taker_cancellation_reason + } + } + + public fun new_market_clearinghouse_callbacks( + // settle_trade_f arguments: taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size + settle_trade_f: |address, address, u64, u64, u64, bool, u64, u64, M, M| SettleTradeResult has drop + copy, + // validate_settlement_update_f arguments: accoun, is_taker, is_long, price, size + validate_order_placement_f: |address, u64, bool, bool, u64, u64, M| bool has drop + copy, + place_maker_order_f: |address, u64, bool, u64, u64, M| has drop + copy, + cleanup_order_f: |address, u64, bool, u64| has drop + copy, + decrease_order_size_f: |address, u64, bool, u64, u64| has drop + copy, + ): MarketClearinghouseCallbacks { + MarketClearinghouseCallbacks { + settle_trade_f, + validate_order_placement_f, + place_maker_order_f, + cleanup_order_f, + decrease_order_size_f + } + } + + public fun get_settled_size(self: &SettleTradeResult): u64 { + self.settled_size + } + + public fun get_maker_cancellation_reason(self: &SettleTradeResult): Option { + self.maker_cancellation_reason + } + + public fun get_taker_cancellation_reason(self: &SettleTradeResult): Option { + self.taker_cancellation_reason + } + + public fun settle_trade( + self: &MarketClearinghouseCallbacks, + taker: address, + maker: address, + taker_order_id: u64, + maker_order_id:u64, + fill_id: u64, + is_taker_long: bool, + price: u64, + size: u64, + taker_metadata: M, + maker_metadata: M): SettleTradeResult { + (self.settle_trade_f)(taker, maker, taker_order_id, maker_order_id, fill_id, is_taker_long, price, size, taker_metadata, maker_metadata) + } + + public fun validate_order_placement( + self: &MarketClearinghouseCallbacks, + account: address, + order_id: u64, + is_taker: bool, + is_bid: bool, + price: u64, + size: u64, + order_metadata: M): bool { + (self.validate_order_placement_f)(account, order_id, is_taker, is_bid, price, size, order_metadata) + } + + public fun place_maker_order( + self: &MarketClearinghouseCallbacks, + account: address, + order_id: u64, + is_bid: bool, + price: u64, + size: u64, + order_metadata: M) { + (self.place_maker_order_f)(account, order_id, is_bid, price, size, order_metadata) + } + + public fun cleanup_order( + self: &MarketClearinghouseCallbacks, + account: address, + order_id: u64, + is_bid: bool, + remaining_size: u64) { + (self.cleanup_order_f)(account, order_id, is_bid, remaining_size) + } + + public fun decrease_order_size( + self: &MarketClearinghouseCallbacks, + account: address, + order_id: u64, + is_bid: bool, + price: u64, + size: u64,) { + (self.decrease_order_size_f)(account, order_id, is_bid, price, size) + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/order_book/active_order_book.move b/aptos-move/framework/aptos-experimental/sources/trading/order_book/active_order_book.move new file mode 100644 index 0000000000000..660b52dde2726 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/order_book/active_order_book.move @@ -0,0 +1,704 @@ +/// (work in progress) +module aptos_experimental::active_order_book { + use std::option::{Self, Option}; + use aptos_std::math64::mul_div; + use aptos_framework::big_ordered_map::BigOrderedMap; + use aptos_experimental::order_book_types::{ + OrderIdType, + UniqueIdxType, + new_active_matched_order, + ActiveMatchedOrder, + get_slippage_pct_precision, + new_default_big_ordered_map + }; + #[test_only] + use std::vector; + #[test_only] + use aptos_experimental::order_book_types::{new_order_id_type, new_unique_idx_type}; + + const EINVALID_MAKER_ORDER: u64 = 1; + /// There is a code bug that breaks internal invariant + const EINTERNAL_INVARIANT_BROKEN: u64 = 2; + + friend aptos_experimental::order_book; + + /// ========= Active OrderBook =========== + + // Active Order Book: + // bids: (order_id, price, unique_priority_idx, volume) + + // (price, unique_priority_idx) -> (volume, order_id) + + const U64_MAX: u64 = 0xffffffffffffffff; + + const U256_MAX: u256 = + 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff; + // 115792089237316195423570985008687907853269984665640564039457584007913129639935; + + struct ActiveBidKey has store, copy, drop { + price: u64, + tie_breaker: UniqueIdxType + } + + struct ActiveBidData has store, copy, drop { + order_id: OrderIdType, + size: u64 + } + + /// OrderBook tracking active (i.e. unconditional, immediately executable) limit orders. + /// + /// - invariant - all buys are smaller than sells, at all times. + /// - tie_breaker in sells is U256_MAX-value, to make sure largest value in the book + /// that is taken first, is the one inserted first, amongst those with same bid price. + enum ActiveOrderBook has store { + V1 { + buys: BigOrderedMap, + sells: BigOrderedMap + } + } + + public fun new_active_order_book(): ActiveOrderBook { + // potentially add max value to both sides (that will be skipped), + // so that max_key never changes, and doesn't create conflict. + ActiveOrderBook::V1 { + buys: new_default_big_ordered_map(), + sells: new_default_big_ordered_map() + } + } + + + /// Picks the best (i.e. highest) bid (i.e. buy) price from the active order book. + /// aborts if there are no buys + public fun best_bid_price(self: &ActiveOrderBook): Option { + if (self.buys.is_empty()) { + option::none() + } else { + let (back_key, _back_value) = self.buys.borrow_back(); + option::some(back_key.price) + } + } + + /// Picks the best (i.e. lowest) ask (i.e. sell) price from the active order book. + /// aborts if there are no sells + public fun best_ask_price(self: &ActiveOrderBook): Option { + if (self.sells.is_empty()) { + option::none() + } else { + let (front_key, _front_value) = self.sells.borrow_front(); + option::some(front_key.price) + } + } + + public fun get_mid_price(self: &ActiveOrderBook): Option { + let best_bid = self.best_bid_price(); + let best_ask = self.best_ask_price(); + if (best_bid.is_none() || best_ask.is_none()) { + option::none() + } else { + option::some( + (best_bid.destroy_some() + best_ask.destroy_some()) / 2 + ) + } + } + + public fun get_slippage_price( + self: &ActiveOrderBook, is_buy: bool, slippage_pct: u64 + ): Option { + let mid_price = self.get_mid_price(); + if (mid_price.is_none()) { + return option::none(); + }; + let mid_price = mid_price.destroy_some(); + let slippage = mul_div( + mid_price, slippage_pct, get_slippage_pct_precision() * 100 + ); + if (is_buy) { + option::some(mid_price + slippage) + } else { + option::some(mid_price - slippage) + } + } + + // TODO check if keeping depth book is more efficient than computing impact prices manually + + fun get_impact_bid_price(self: &ActiveOrderBook, impact_size: u64): Option { + let total_value = (0 as u128); + let total_size = 0; + let orders = &self.buys; + if (orders.is_empty()) { + return option::none(); + }; + let (front_key, front_value) = orders.borrow_back(); + while (total_size < impact_size) { + let matched_size = + if (total_size + front_value.size > impact_size) { + impact_size - total_size + } else { + front_value.size + }; + total_value = total_value + + (matched_size as u128) * (front_key.price as u128); + total_size = total_size + matched_size; + let next_key = orders.prev_key(&front_key); + if (next_key.is_none()) { + // TODO maybe we should return none if there is not enough depth? + break; + }; + front_key = next_key.destroy_some(); + front_value = orders.borrow(&front_key); + }; + option::some((total_value / (total_size as u128)) as u64) + } + + fun get_impact_ask_price(self: &ActiveOrderBook, impact_size: u64): Option { + let total_value = 0 as u128; + let total_size = 0; + let orders = &self.sells; + if (orders.is_empty()) { + return option::none(); + }; + let (front_key, front_value) = orders.borrow_front(); + while (total_size < impact_size) { + let matched_size = + if (total_size + front_value.size > impact_size) { + impact_size - total_size + } else { + front_value.size + }; + total_value = total_value + + (matched_size as u128) * (front_key.price as u128); + total_size = total_size + matched_size; + let next_key = orders.next_key(&front_key); + if (next_key.is_none()) { + break; + }; + front_key = next_key.destroy_some(); + front_value = orders.borrow(&front_key); + }; + option::some((total_value / (total_size as u128)) as u64) + } + + inline fun get_tie_breaker( + unique_priority_idx: UniqueIdxType, is_buy: bool + ): UniqueIdxType { + if (is_buy) { + unique_priority_idx + } else { + unique_priority_idx.descending_idx() + } + } + + public fun cancel_active_order( + self: &mut ActiveOrderBook, + price: u64, + unique_priority_idx: UniqueIdxType, + is_buy: bool + ): u64 { + let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy); + let key = ActiveBidKey { price: price, tie_breaker }; + let value = + if (is_buy) { + self.buys.remove(&key) + } else { + self.sells.remove(&key) + }; + value.size + } + + public fun is_active_order( + self: &ActiveOrderBook, + price: u64, + unique_priority_idx: UniqueIdxType, + is_buy: bool + ): bool { + let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy); + let key = ActiveBidKey { price: price, tie_breaker }; + if (is_buy) { + self.buys.contains(&key) + } else { + self.sells.contains(&key) + } + } + + /// Check if the order is a taker order - i.e. if it can be immediately matched with the order book fully or partially. + public fun is_taker_order( + self: &ActiveOrderBook, price: u64, is_buy: bool + ): bool { + if (is_buy) { + let best_ask_price = self.best_ask_price(); + best_ask_price.is_some() && price >= best_ask_price.destroy_some() + } else { + let best_bid_price = self.best_bid_price(); + best_bid_price.is_some() && price <= best_bid_price.destroy_some() + } + } + + fun single_match_with_current_active_order( + remaining_size: u64, + cur_key: ActiveBidKey, + cur_value: ActiveBidData, + orders: &mut BigOrderedMap + ): ActiveMatchedOrder { + let is_cur_match_fully_consumed = cur_value.size <= remaining_size; + + let matched_size_for_this_order = + if (is_cur_match_fully_consumed) { + cur_value.size + } else { + remaining_size + }; + + let result = + new_active_matched_order( + cur_value.order_id, + matched_size_for_this_order, // Matched size on the maker order + cur_value.size - matched_size_for_this_order // Remaining size on the maker order + ); + + if (is_cur_match_fully_consumed) { + orders.remove(&cur_key); + } else { + orders.borrow_mut(&cur_key).size -= matched_size_for_this_order; + }; + result + } + + fun get_single_match_for_buy_order( + self: &mut ActiveOrderBook, price: u64, size: u64 + ): ActiveMatchedOrder { + let (smallest_key, smallest_value) = self.sells.borrow_front(); + assert!(price >= smallest_key.price, EINTERNAL_INVARIANT_BROKEN); + single_match_with_current_active_order( + size, + smallest_key, + *smallest_value, + &mut self.sells + ) + } + + fun get_single_match_for_sell_order( + self: &mut ActiveOrderBook, price: u64, size: u64 + ): ActiveMatchedOrder { + let (largest_key, largest_value) = self.buys.borrow_back(); + assert!(price <= largest_key.price, EINTERNAL_INVARIANT_BROKEN); + single_match_with_current_active_order( + size, + largest_key, + *largest_value, + &mut self.buys + ) + } + + public fun get_single_match_result( + self: &mut ActiveOrderBook, + price: u64, + size: u64, + is_buy: bool + ): ActiveMatchedOrder { + if (is_buy) { + self.get_single_match_for_buy_order(price, size) + } else { + self.get_single_match_for_sell_order(price, size) + } + } + + /// Increase the size of the order in the orderbook without altering its position in the price-time priority. + public fun increase_order_size( + self: &mut ActiveOrderBook, + price: u64, + unique_priority_idx: UniqueIdxType, + size_delta: u64, + is_buy: bool + ) { + let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy); + let key = ActiveBidKey { price, tie_breaker }; + if (is_buy) { + self.buys.borrow_mut(&key).size += size_delta; + } else { + self.sells.borrow_mut(&key).size += size_delta; + }; + } + + /// Decrease the size of the order in the order book without altering its position in the price-time priority. + public fun decrease_order_size( + self: &mut ActiveOrderBook, + price: u64, + unique_priority_idx: UniqueIdxType, + size_delta: u64, + is_buy: bool + ) { + let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy); + let key = ActiveBidKey { price, tie_breaker }; + if (is_buy) { + self.buys.borrow_mut(&key).size -= size_delta; + } else { + self.sells.borrow_mut(&key).size -= size_delta; + }; + } + + public fun place_maker_order( + self: &mut ActiveOrderBook, + order_id: OrderIdType, + price: u64, + unique_priority_idx: UniqueIdxType, + size: u64, + is_buy: bool + ) { + let tie_breaker = get_tie_breaker(unique_priority_idx, is_buy); + let key = ActiveBidKey { price, tie_breaker }; + let value = ActiveBidData { order_id, size }; + // Assert that this is not a taker order + assert!(!self.is_taker_order(price, is_buy), EINVALID_MAKER_ORDER); + if (is_buy) { + self.buys.add(key, value); + } else { + self.sells.add(key, value); + }; + } + + #[test_only] + public fun destroy_active_order_book(self: ActiveOrderBook) { + let ActiveOrderBook::V1 { sells, buys } = self; + sells.destroy(|_v| {}); + buys.destroy(|_v| {}); + } + + #[test_only] + struct TestOrder has copy, drop { + account: address, + account_order_id: u64, + price: u64, + size: u64, + unique_idx: UniqueIdxType, + is_buy: bool + } + + #[test_only] + fun place_test_order(self: &mut ActiveOrderBook, order: TestOrder): + vector { + let result = vector::empty(); + let remaining_size = order.size; + while (remaining_size > 0) { + if (!self.is_taker_order(order.price, order.is_buy)) { + self.place_maker_order( + new_order_id_type(order.account, order.account_order_id), + order.price, + order.unique_idx, + order.size, + order.is_buy + ); + return result; + }; + let match_result = + self.get_single_match_result(order.price, remaining_size, order.is_buy); + remaining_size -= match_result.get_active_matched_size(); + result.push_back(match_result); + }; + result + } + + #[test] + // TODO (skedia) Add more comprehensive tests for the acive order book + fun test_active_order_book() { + let active_order_book = new_active_order_book(); + + assert!(active_order_book.best_bid_price().is_none()); + assert!(active_order_book.best_ask_price().is_none()); + + // $200 - 10000 + // -- + let match_result = + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 0, + price: 200, + size: 1000, + unique_idx: new_unique_idx_type(0), + is_buy: false + } + ); + assert!(match_result.is_empty()); + + // $200 - 10000 + // -- + // $100 - 1000 + let match_result = + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 1, + price: 100, + size: 1000, + unique_idx: new_unique_idx_type(1), + is_buy: true + } + ); + assert!(match_result.is_empty()); + + assert!(active_order_book.best_bid_price().destroy_some() == 100); + assert!(active_order_book.best_ask_price().destroy_some() == 200); + + // $200 - 10000 + // $150 - 100 + // -- + // $100 - 1000 + let match_result = + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 2, + price: 150, + size: 100, + unique_idx: new_unique_idx_type(2), + is_buy: false + } + ); + assert!(match_result.is_empty()); + + // $200 - 10000 + // $175 - 100 + // $150 - 100 + // -- + // $100 - 1000 + let match_result = + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 3, + price: 175, + size: 100, + unique_idx: new_unique_idx_type(3), + is_buy: false + } + ); + assert!(match_result.is_empty()); + + assert!(active_order_book.best_bid_price().destroy_some() == 100); + assert!(active_order_book.best_ask_price().destroy_some() == 150); + + // $200 - 10000 + // $175 - 100 + // $150 - 50 <-- match 50 units + // -- + // $100 - 1000 + let match_result = + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 4, + price: 160, + size: 50, + unique_idx: new_unique_idx_type(4), + is_buy: true + } + ); + assert!(match_result.length() == 1); + // TODO - seems like we have no match price in ActiveMatchResult any more + // we need to add it back, and assert? + // Maker ask order was partially filled 100 -> 50 + assert!( + match_result + == vector[ + new_active_matched_order( + new_order_id_type(@0xAA, 2), + 50, // matched size + 50 // remaining size + ) + ], + 7 + ); + active_order_book.destroy_active_order_book(); + } + + #[test] + fun test_get_impact_sell_price() { + let active_order_book = new_active_order_book(); + + // Add sell orders at different prices + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 1, + price: 100, + size: 50, + unique_idx: new_unique_idx_type(1), + is_buy: false + } + ); + + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 2, + price: 150, + size: 100, + unique_idx: new_unique_idx_type(2), + is_buy: false + } + ); + + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 3, + price: 200, + size: 150, + unique_idx: new_unique_idx_type(3), + is_buy: false + } + ); + + // Test impact price calculations + // Impact size 50 should give price of lowest order (100) + assert!(active_order_book.get_impact_ask_price(50).destroy_some() == 100, 1); + + // Impact size 100 should give weighted average of first two orders + // (50 * 100 + 50 * 150) / 100 = 125 + assert!(active_order_book.get_impact_ask_price(100).destroy_some() == 125, 2); + + // Impact size 200 should give weighted average of all orders + // (50 * 100 + 100 * 150 + 50 * 200) / 200 = 150 + assert!(active_order_book.get_impact_ask_price(200).destroy_some() == 150, 3); + + // Impact size larger than total available should still use all orders + // (50 * 100 + 100 * 150 + 150 * 200) / 300 = 166 + assert!(active_order_book.get_impact_ask_price(1000).destroy_some() == 166, 4); + + active_order_book.destroy_active_order_book(); + } + + #[test] + fun test_get_impact_bid_price() { + let active_order_book = new_active_order_book(); + + // Place test buy orders at different prices + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 1, + price: 200, + size: 50, + unique_idx: new_unique_idx_type(1), + is_buy: true + } + ); + + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 2, + price: 150, + size: 100, + unique_idx: new_unique_idx_type(2), + is_buy: true + } + ); + + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 3, + price: 100, + size: 150, + unique_idx: new_unique_idx_type(3), + is_buy: true + } + ); + + // Test impact price calculations + // Impact size 50 should give price of first order (200) + assert!(active_order_book.get_impact_bid_price(50).destroy_some() == 200, 1); + + // Impact size 100 should give weighted average of first two orders + // (50 * 200 + 50 * 150) / 100 = 175 + assert!(active_order_book.get_impact_bid_price(100).destroy_some() == 175, 2); + + // Impact size 200 should give weighted average of all orders + // (50 * 200 + 100 * 150 + 50 * 100) / 200 = 150 + assert!(active_order_book.get_impact_bid_price(200).destroy_some() == 150, 3); + + // Impact size larger than total available should still use all orders + // (50 * 200 + 100 * 150 + 150 * 100) / 300 = 133 + assert!(active_order_book.get_impact_bid_price(1000).destroy_some() == 133, 4); + + active_order_book.destroy_active_order_book(); + } + + #[test] + fun test_get_slippage_price() { + let active_order_book = new_active_order_book(); + + // Add sell orders at different prices + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 1, + price: 101, + size: 50, + unique_idx: new_unique_idx_type(1), + is_buy: false + } + ); + + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 2, + price: 102, + size: 100, + unique_idx: new_unique_idx_type(2), + is_buy: false + } + ); + + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 3, + price: 103, + size: 150, + unique_idx: new_unique_idx_type(3), + is_buy: false + } + ); + + // Add some buy orders + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 4, + price: 99, + size: 50, + unique_idx: new_unique_idx_type(4), + is_buy: true + } + ); + + active_order_book.place_test_order( + TestOrder { + account: @0xAA, + account_order_id: 5, + price: 98, + size: 100, + unique_idx: new_unique_idx_type(5), + is_buy: true + } + ); + + // Test slippage price calculations + assert!(active_order_book.get_mid_price().destroy_some() == 100); + // Slippage 10% for buy order should give price of mid price (100) + 10% = 110 + assert!(active_order_book.get_slippage_price(true, 1000).destroy_some() == 110); + assert!(active_order_book.get_slippage_price(true, 100).destroy_some() == 101); + assert!(active_order_book.get_slippage_price(true, 10).destroy_some() == 100); + + assert!(active_order_book.get_slippage_price(false, 1500).destroy_some() == 85); + assert!(active_order_book.get_slippage_price(false, 100).destroy_some() == 99); + assert!(active_order_book.get_slippage_price(false, 10).destroy_some() == 100); + assert!(active_order_book.get_slippage_price(false, 0).destroy_some() == 100); + + active_order_book.destroy_active_order_book(); + + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book.move b/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book.move new file mode 100644 index 0000000000000..9382ec7f33d59 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book.move @@ -0,0 +1,1339 @@ +/// This module provides a core order book functionality for a trading system. On a high level, it has three major +/// components +/// 1. ActiveOrderBook: This is the main order book that keeps track of active orders and their states. The active order +/// book is backed by a BigOrderedMap, which is a data structure that allows for efficient insertion, deletion, and matching of the order +/// The orders are matched based on time-price priority. +/// 2. PendingOrderBookIndex: This keeps track of pending orders. The pending orders are those that are not active yet. Three +/// types of pending orders are supported. +/// - Price move up - Trigggered when the price moves above a certain price level +/// - Price move down - Triggered when the price moves below a certain price level +/// - Time based - Triggered when a certain time has passed +/// 3. Orders: This is a BigOrderMap of order id to order details. +/// +module aptos_experimental::order_book { + use std::vector; + use std::error; + use std::option::{Self, Option}; + use aptos_framework::big_ordered_map::BigOrderedMap; + + use aptos_experimental::order_book_types::{ + OrderIdType, + OrderWithState, + generate_unique_idx_fifo_tiebraker, + new_order_id_type, + new_order, + new_order_with_state, + new_single_order_match, + new_default_big_ordered_map, + TriggerCondition, + UniqueIdxType, + SingleOrderMatch, + Order + }; + use aptos_experimental::active_order_book::{ActiveOrderBook, new_active_order_book}; + use aptos_experimental::pending_order_book_index::{ + PendingOrderBookIndex, + new_pending_order_book_index + }; + #[test_only] + use aptos_experimental::order_book_types::tp_trigger_condition; + + const U256_MAX: u256 = + 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff; + + const EORDER_ALREADY_EXISTS: u64 = 1; + const EPOST_ONLY_FILLED: u64 = 2; + const EORDER_NOT_FOUND: u64 = 4; + const EINVALID_INACTIVE_ORDER_STATE: u64 = 5; + const EINVALID_ADD_SIZE_TO_ORDER: u64 = 6; + const E_NOT_ACTIVE_ORDER: u64 = 7; + + struct OrderRequest has copy, drop { + account: address, + account_order_id: u64, + unique_priority_idx: Option, + price: u64, + orig_size: u64, + remaining_size: u64, + is_buy: bool, + trigger_condition: Option, + metadata: M + } + + enum OrderBook has store { + V1 { + orders: BigOrderedMap>, + active_orders: ActiveOrderBook, + pending_orders: PendingOrderBookIndex + } + } + + enum OrderType has store, drop, copy { + GoodTilCancelled, + PostOnly, + FillOrKill + } + + public fun new_order_request( + account: address, + account_order_id: u64, + unique_priority_idx: Option, + price: u64, + orig_size: u64, + remaining_size: u64, + is_buy: bool, + trigger_condition: Option, + metadata: M + ): OrderRequest { + OrderRequest { + account, + account_order_id, + unique_priority_idx, + price, + orig_size, + remaining_size, + is_buy, + trigger_condition, + metadata + } + } + + public fun new_order_book(): OrderBook { + OrderBook::V1 { + orders: new_default_big_ordered_map(), + active_orders: new_active_order_book(), + pending_orders: new_pending_order_book_index() + } + } + + + + /// Cancels an order from the order book. If the order is active, it is removed from the active order book else + /// it is removed from the pending order book. The API doesn't abort if the order is not found in the order book - + /// this is a TODO for now. + public fun cancel_order( + self: &mut OrderBook, account: address, account_order_id: u64 + ): Option> { + let order_id = new_order_id_type(account, account_order_id); + assert!(self.orders.contains(&order_id), EORDER_NOT_FOUND); + let order_with_state = self.orders.remove(&order_id); + let (order, is_active) = order_with_state.destroy_order_from_state(); + if (is_active) { + let (_, unique_priority_idx, bid_price, _orig_size, _size, is_buy, _, _) = + order.destroy_order(); + self.active_orders.cancel_active_order(bid_price, unique_priority_idx, is_buy); + } else { + let ( + _, + unique_priority_idx, + _bid_price, + _orig_size, + _size, + is_buy, + trigger_condition, + _ + ) = order.destroy_order(); + self.pending_orders.cancel_pending_order( + trigger_condition.destroy_some(), unique_priority_idx, is_buy + ); + }; + return option::some(order) + } + + /// Checks if the order is a taker order i.e., matched immediatedly with the active order book. + public fun is_taker_order( + self: &OrderBook, + price: u64, + is_buy: bool, + trigger_condition: Option + ): bool { + if (trigger_condition.is_some()) { + return false; + }; + return self.active_orders.is_taker_order(price, is_buy) + } + + /// Places a maker order to the order book. If the order is a pending order, it is added to the pending order book + /// else it is added to the active order book. The API aborts if its not a maker order or if the order already exists + public fun place_maker_order( + self: &mut OrderBook, order_req: OrderRequest + ) { + if (order_req.trigger_condition.is_some()) { + return self.place_pending_maker_order(order_req); + }; + + let order_id = new_order_id_type(order_req.account, order_req.account_order_id); + let unique_priority_idx = + if (order_req.unique_priority_idx.is_some()) { + order_req.unique_priority_idx.destroy_some() + } else { + generate_unique_idx_fifo_tiebraker() + }; + + assert!( + !self.orders.contains(&order_id), + error::invalid_argument(EORDER_ALREADY_EXISTS) + ); + + let order = + new_order( + order_id, + unique_priority_idx, + order_req.price, + order_req.orig_size, + order_req.remaining_size, + order_req.is_buy, + order_req.trigger_condition, + order_req.metadata + ); + self.orders.add(order_id, new_order_with_state(order, true)); + self.active_orders.place_maker_order( + order_id, + order_req.price, + unique_priority_idx, + order_req.remaining_size, + order_req.is_buy + ); + } + + /// Reinserts a maker order to the order book. This is used when the order is removed from the order book + /// but the clearinghouse fails to settle all or part of the order. If the order doesn't exist in the order book, + /// it is added to the order book, if it exists, it's size is updated. + public fun reinsert_maker_order( + self: &mut OrderBook, order_req: OrderRequest + ) { + assert!(order_req.trigger_condition.is_none(), E_NOT_ACTIVE_ORDER); + let order_id = new_order_id_type(order_req.account, order_req.account_order_id); + if (!self.orders.contains(&order_id)) { + return self.place_maker_order(order_req); + }; + let order_with_state = self.orders.remove(&order_id); + order_with_state.increase_remaining_size(order_req.remaining_size); + self.orders.add(order_id, order_with_state); + self.active_orders.increase_order_size( + order_req.price, + order_req.unique_priority_idx.destroy_some(), + order_req.remaining_size, + order_req.is_buy + ); + } + + fun place_pending_maker_order( + self: &mut OrderBook, order_req: OrderRequest + ) { + let order_id = new_order_id_type(order_req.account, order_req.account_order_id); + let unique_priority_idx = + if (order_req.unique_priority_idx.is_some()) { + order_req.unique_priority_idx.destroy_some() + } else { + generate_unique_idx_fifo_tiebraker() + }; + let order = + new_order( + order_id, + unique_priority_idx, + order_req.price, + order_req.orig_size, + order_req.remaining_size, + order_req.is_buy, + order_req.trigger_condition, + order_req.metadata + ); + + self.orders.add(order_id, new_order_with_state(order, false)); + + self.pending_orders.place_pending_maker_order( + order_id, + order_req.trigger_condition.destroy_some(), + unique_priority_idx, + order_req.is_buy + ); + } + + /// Returns a single match for a taker order. It is responsibility of the caller to first call the `is_taker_order` + /// API to ensure that the order is a taker order before calling this API, otherwise it will abort. + public fun get_single_match_for_taker( + self: &mut OrderBook, + price: u64, + size: u64, + is_buy: bool + ): SingleOrderMatch { + let result = self.active_orders.get_single_match_result(price, size, is_buy); + let (order_id, matched_size, remaining_size) = + result.destroy_active_matched_order(); + let order_with_state = self.orders.remove(&order_id); + order_with_state.set_remaining_size(remaining_size); + if (remaining_size > 0) { + self.orders.add(order_id, order_with_state); + }; + let (order, is_active) = order_with_state.destroy_order_from_state(); + assert!(is_active, EINVALID_INACTIVE_ORDER_STATE); + new_single_order_match(order, matched_size) + } + + /// Decrease the size of the order by the given size delta. The API aborts if the order is not found in the order book or + /// if the size delta is greater than or equal to the remaining size of the order. Please note that the API will abort and + /// not cancel the order if the size delta is equal to the remaining size of the order, to avoid unintended + /// cancellation of the order. Please use the `cancel_order` API to cancel the order. + public fun decrease_order_size( + self: &mut OrderBook, account: address, account_order_id: u64, size_delta: u64 + ) { + let order_id = new_order_id_type(account, account_order_id); + assert!(self.orders.contains(&order_id), EORDER_NOT_FOUND); + let order_with_state = self.orders.remove(&order_id); + order_with_state.decrease_remaining_size(size_delta); + if (order_with_state.is_active_order()) { + let order = order_with_state.get_order_from_state(); + self.active_orders.decrease_order_size( + order.get_price(), + order_with_state.get_unique_priority_idx_from_state(), + size_delta, + order.is_bid() + ); + }; + self.orders.add(order_id, order_with_state); + } + + public fun is_active_order( + self: &OrderBook, account: address, account_order_id: u64 + ): bool { + let order_id = new_order_id_type(account, account_order_id); + if (!self.orders.contains(&order_id)) { + return false; + }; + self.orders.borrow(&order_id).is_active_order() + } + + public fun get_order( + self: &OrderBook, account: address, account_order_id: u64 + ): Option> { + let order_id = new_order_id_type(account, account_order_id); + if (!self.orders.contains(&order_id)) { + return option::none(); + }; + option::some(*self.orders.borrow(&order_id)) + } + + public fun get_remaining_size( + self: &OrderBook, account: address, account_order_id: u64 + ): u64 { + let order_id = new_order_id_type(account, account_order_id); + if (!self.orders.contains(&order_id)) { + return 0; + }; + self.orders.borrow(&order_id).get_remaining_size_from_state() + } + + /// Removes and returns the orders that are ready to be executed based on the current price. + public fun take_ready_price_based_orders( + self: &mut OrderBook, current_price: u64 + ): vector> { + let self_orders = &mut self.orders; + let order_ids = self.pending_orders.take_ready_price_based_orders(current_price); + let orders = vector::empty(); + + order_ids.for_each(|order_id| { + let order_with_state = self_orders.remove(&order_id); + let (order, _) = order_with_state.destroy_order_from_state(); + orders.push_back(order); + }); + orders + } + + public fun best_bid_price(self: &OrderBook): Option { + self.active_orders.best_bid_price() + } + + public fun best_ask_price(self: &OrderBook): Option { + self.active_orders.best_ask_price() + } + + public fun get_slippage_price( + self: &OrderBook, is_buy: bool, slippage_pct: u64 + ): Option { + self.active_orders.get_slippage_price(is_buy, slippage_pct) + } + + /// Removes and returns the orders that are ready to be executed based on the time condition. + public fun take_ready_time_based_orders( + self: &mut OrderBook + ): vector> { + let self_orders = &mut self.orders; + let order_ids = self.pending_orders.take_time_time_based_orders(); + let orders = vector::empty(); + + order_ids.for_each(|order_id| { + let order_with_state = self_orders.remove(&order_id); + let (order, _) = order_with_state.destroy_order_from_state(); + orders.push_back(order); + }); + orders + } + + // ============================= test_only APIs ==================================== + + #[test_only] + public fun destroy_order_book(self: OrderBook) { + let OrderBook::V1 { orders, active_orders, pending_orders } = self; + orders.destroy(|_v| {}); + active_orders.destroy_active_order_book(); + pending_orders.destroy_pending_order_book_index(); + } + + #[test_only] + public fun get_unique_priority_idx( + self: &OrderBook, account: address, account_order_id: u64 + ): Option { + let order_id = new_order_id_type(account, account_order_id); + if (!self.orders.contains(&order_id)) { + return option::none(); + }; + option::some(self.orders.borrow(&order_id).get_unique_priority_idx_from_state()) + } + + public fun place_order_and_get_matches( + self: &mut OrderBook, order_req: OrderRequest + ): vector> { + let match_results = vector::empty(); + let remainig_size = order_req.remaining_size; + while (remainig_size > 0) { + if (!self.is_taker_order(order_req.price, order_req.is_buy, order_req.trigger_condition)) { + self.place_maker_order( + OrderRequest { + account: order_req.account, + account_order_id: order_req.account_order_id, + unique_priority_idx: option::none(), + price: order_req.price, + orig_size: order_req.orig_size, + remaining_size: remainig_size, + is_buy: order_req.is_buy, + trigger_condition: order_req.trigger_condition, + metadata: order_req.metadata + } + ); + return match_results; + }; + let match_result = + self.get_single_match_for_taker( + order_req.price, remainig_size, order_req.is_buy + ); + let matched_size = match_result.get_matched_size(); + match_results.push_back(match_result); + remainig_size -= matched_size; + }; + return match_results + } + + #[test_only] + public fun update_order_and_get_matches( + self: &mut OrderBook, order_req: OrderRequest + ): vector> { + let unique_priority_idx = + self.get_unique_priority_idx(order_req.account, order_req.account_order_id); + assert!(unique_priority_idx.is_some(), EORDER_NOT_FOUND); + let unique_priority_idx = unique_priority_idx.destroy_some(); + self.cancel_order(order_req.account, order_req.account_order_id); + let order_req = OrderRequest { + account: order_req.account, + account_order_id: order_req.account_order_id, + unique_priority_idx: option::some(unique_priority_idx), + price: order_req.price, + orig_size: order_req.orig_size, + remaining_size: order_req.remaining_size, + is_buy: order_req.is_buy, + trigger_condition: order_req.trigger_condition, + metadata: order_req.metadata + }; + self.place_order_and_get_matches(order_req) + } + + #[test_only] + public fun trigger_pending_orders( + self: &mut OrderBook, oracle_price: u64 + ): vector> { + let ready_orders = self.take_ready_price_based_orders(oracle_price); + let all_matches = vector::empty(); + let i = 0; + while (i < ready_orders.length()) { + let order = ready_orders[i]; + let ( + order_id, + unique_priority_idx, + price, + orig_size, + remaining_size, + is_buy, + _, + metadata + ) = order.destroy_order(); + let (account, account_order_id) = order_id.destroy_order_id_type(); + let order_req = OrderRequest { + account, + account_order_id, + unique_priority_idx: option::some(unique_priority_idx), + price, + orig_size, + remaining_size, + is_buy, + trigger_condition: option::none(), + metadata + }; + let match_results = self.place_order_and_get_matches(order_req); + all_matches.append(match_results); + i = i + 1; + }; + all_matches + } + + #[test_only] + public fun total_matched_size( + match_results: &vector> + ): u64 { + let total_matched_size = 0; + let i = 0; + while (i < match_results.length()) { + total_matched_size = total_matched_size + + match_results[i].get_matched_size(); + i = i + 1; + }; + total_matched_size + } + + struct TestMetadata has store, copy, drop {} + + // ============================= Tests ==================================== + + #[test] + fun test_good_til_cancelled_order() { + let order_book = new_order_book(); + + // Place a GTC sell order + let order_req = OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + }; + let match_results = order_book.place_order_and_get_matches(order_req); + assert!(match_results.is_empty()); // No matches for first order + + // Verify order exists and is active + let order_id = new_order_id_type(@0xAA, 1); + let order_state = *order_book.orders.borrow(&order_id); + let (order, is_active) = order_state.destroy_order_from_state(); + let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) = + order.destroy_order(); + assert!(is_active == true); + assert!(price == 100); + assert!(orig_size == 1000); + assert!(size == 1000); + assert!(is_buy == false); + + // Place a matching buy order for partial fill + let match_results = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 400, + remaining_size: 400, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + // // Verify taker match details + assert!(total_matched_size(&match_results) == 400); + assert!(order_book.get_remaining_size(@0xBB, 1) == 0); + + // Verify maker match details + assert!(match_results.length() == 1); // One match result + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 400); + assert!(order.get_orig_size() == 1000); + assert!(order.get_remaining_size() == 600); // Maker order partially filled + + // Verify original order still exists but with reduced size + let order_state = *order_book.orders.borrow(&order_id); + let (order, is_active) = order_state.destroy_order_from_state(); + let (_, _unique_priority_idx, price, orig_size, size, is_buy, _, _) = + order.destroy_order(); + assert!(is_active == true); + assert!(price == 100); + assert!(orig_size == 1000); + assert!(size == 600); + assert!(is_buy == false); + + // Cancel the remaining order + order_book.cancel_order(@0xAA, 1); + + // Verify order no longer exists + assert!(order_book.get_remaining_size(@0xAA, 1) == 0); + + // Since we cannot drop the order book, we move it to a test struct + order_book.destroy_order_book(); + } + + #[test] + fun test_update_buy_order() { + let order_book = new_order_book(); + + // Place a GTC sell order + let match_results = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 101, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_results.is_empty()); + + let match_results = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 500, + remaining_size: 500, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_results.is_empty()); + + // Update the order so that it would match immediately + let match_results = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 2, + unique_priority_idx: option::none(), + price: 101, + orig_size: 500, + remaining_size: 500, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + + // Verify taker (buy order) was fully filled + assert!(total_matched_size(&match_results) == 500); + assert!(order_book.get_remaining_size(@0xBB, 2) == 0); + + assert!(match_results.length() == 1); + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 500); + assert!(order.get_orig_size() == 1000); + assert!(order.get_remaining_size() == 500); // Partial fill + + order_book.destroy_order_book(); + } + + #[test] + fun test_update_sell_order() { + let order_book = new_order_book(); + + // Place a GTC sell order + let order_req = OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + }; + let match_result = order_book.place_order_and_get_matches(order_req); + assert!(match_result.is_empty()); // No matches for first order + + // Place a buy order at lower price + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 99, + orig_size: 500, + remaining_size: 500, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_result.is_empty()); + + // Update sell order to match with buy order + let match_results = + order_book.update_order_and_get_matches( + OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 99, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + + // Verify taker (sell order) was partially filled + assert!(total_matched_size(&match_results) == 500); + + assert!(match_results.length() == 1); // One match result + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xBB, 1)); + assert!(matched_size == 500); + assert!(order.get_orig_size() == 500); + assert!(order.get_remaining_size() == 0); // Fully filled + + order_book.destroy_order_book(); + } + + #[test] + #[expected_failure(abort_code = EORDER_NOT_FOUND)] + fun test_update_order_not_found() { + let order_book = new_order_book(); + + // Place a GTC sell order + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 101, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_result.is_empty()); // No matches for first order + + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 500, + remaining_size: 500, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_result.is_empty()); + + // Try to update non existant order + let match_result = + order_book.update_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 3, + unique_priority_idx: option::none(), + price: 100, + orig_size: 500, + remaining_size: 500, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + // This should fail with EORDER_NOT_FOUND + assert!(match_result.is_empty()); + order_book.destroy_order_book(); + } + + #[test] + fun test_good_til_cancelled_partial_fill() { + let order_book = new_order_book(); + + // Place a GTC sell order for 1000 units at price 100 + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_result.is_empty()); // No matches for first order + + // Place a smaller buy order (400 units) at the same price + let match_results = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 400, + remaining_size: 400, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + + // Verify taker (buy order) was fully filled + assert!(total_matched_size(&match_results) == 400); + + // Verify maker (sell order) was partially filled + assert!(match_results.length() == 1); + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 400); + assert!(order.get_orig_size() == 1000); + assert!(order.get_remaining_size() == 600); // Partial fill + + // Place another buy order for 300 units + let match_results = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 2, + unique_priority_idx: option::none(), + price: 100, + orig_size: 300, + remaining_size: 300, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_results.length() == 1); // Should match with the sell order + + // Verify second taker was fully filled + assert!(total_matched_size(&match_results) == 300); + + // Verify original maker was partially filled again + assert!(match_results.length() == 1); + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 300); + assert!(order.get_orig_size() == 1000); + assert!(order.get_remaining_size() == 300); // Still partial as 300 units remain + + // Original sell order should still exist with 300 units remaining + let order_id = new_order_id_type(@0xAA, 1); + let order_state = *order_book.orders.borrow(&order_id); + let (order, is_active) = order_state.destroy_order_from_state(); + let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) = + order.destroy_order(); + assert!(is_active == true); + assert!(price == 100); + assert!(orig_size == 1000); + assert!(size == 300); // 1000 - 400 - 300 = 300 remaining + assert!(is_buy == false); + + order_book.destroy_order_book(); + } + + #[test] + fun test_good_til_cancelled_taker_partial_fill() { + let order_book = new_order_book(); + + // Place a GTC sell order for 500 units at price 100 + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 500, + remaining_size: 500, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_result.is_empty()); // No matches for first order + + // Place a larger buy order (800 units) at the same price + // Should partially fill against the sell order and remain in book + let match_results = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 800, + remaining_size: 800, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + + // Verify taker (buy order) was partially filled + assert!(total_matched_size(&match_results) == 500); + + // Verify maker (sell order) was fully filled + assert!(match_results.length() == 1); + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 500); + assert!(order.get_orig_size() == 500); + assert!(order.get_remaining_size() == 0); // Fully filled + + // Verify original sell order no longer exists (fully filled) + let order_id = new_order_id_type(@0xAA, 1); + assert!(!order_book.orders.contains(&order_id)); + + // Verify buy order still exists with remaining size + let order_id = new_order_id_type(@0xBB, 1); + let order_state = *order_book.orders.borrow(&order_id); + let (order, is_active) = order_state.destroy_order_from_state(); + let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) = + order.destroy_order(); + assert!(is_active == true); + assert!(price == 100); + assert!(orig_size == 800); + assert!(size == 300); // 800 - 500 = 300 remaining + assert!(is_buy == true); + + order_book.destroy_order_book(); + } + + #[test] + fun test_TP_order() { + let order_book = new_order_book(); + + // Place a GTC sell order for 1000 units at price 100 + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_result.is_empty()); // No matches for first order + + assert!(order_book.trigger_pending_orders(100).is_empty()); + + // Place a smaller buy order (400 units) at the same price + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 400, + remaining_size: 400, + is_buy: true, + trigger_condition: option::some(tp_trigger_condition(90)), + metadata: TestMetadata {} + } + ); + // Even if the price of 100 can be matched in the order book the trigger condition 90 should not trigger + // the matching + assert!(match_result.is_empty()); + assert!( + order_book.pending_orders.get_price_move_down_index().keys().length() == 1 + ); + + // Trigger the pending orders with a price of 90 + let match_results = order_book.trigger_pending_orders(90); + + // Verify taker (buy order) was fully filled + assert!(total_matched_size(&match_results) == 400); + + // Verify maker (sell order) was partially filled + assert!(match_results.length() == 1); + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 400); + assert!(order.get_orig_size() == 1000); + assert!(order.get_remaining_size() == 600); // Partial fill + + // Place another buy order for 300 units + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 2, + unique_priority_idx: option::none(), + price: 100, + orig_size: 300, + remaining_size: 300, + is_buy: true, + trigger_condition: option::some(tp_trigger_condition(80)), + metadata: TestMetadata {} + } + ); + + assert!(match_result.is_empty()); + assert!( + order_book.pending_orders.get_price_move_down_index().keys().length() == 1 + ); + + // Oracle price moves up to 95, this should not trigger any order + let match_results = order_book.trigger_pending_orders(95); + assert!(match_results.length() == 0); + + // Move the oracle price down to 80, this should trigger the order + let match_results = order_book.trigger_pending_orders(80); + // Verify second taker was fully filled + assert!(total_matched_size(&match_results) == 300); + + // Verify original maker was partially filled again + assert!(match_results.length() == 1); + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 300); + assert!(order.get_orig_size() == 1000); + assert!(order.get_remaining_size() == 300); // Still partial as 300 units remain + + // Original sell order should still exist with 300 units remaining + let order_id = new_order_id_type(@0xAA, 1); + let order_state = *order_book.orders.borrow(&order_id); + let (order, is_active) = order_state.destroy_order_from_state(); + let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) = + order.destroy_order(); + assert!(is_active == true); + assert!(price == 100); + assert!(orig_size == 1000); + assert!(size == 300); // 1000 - 400 - 300 = 300 remaining + assert!(is_buy == false); + + order_book.destroy_order_book(); + } + + #[test] + fun test_SL_order() { + let order_book = new_order_book(); + + // Place a GTC sell order for 1000 units at price 100 + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + } + ); + assert!(match_result.is_empty()); // No matches for first order + + assert!(order_book.trigger_pending_orders(100).is_empty()); + + // Place a smaller buy order (400 units) at the same price + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 400, + remaining_size: 400, + is_buy: false, + trigger_condition: option::some(tp_trigger_condition(110)), + metadata: TestMetadata {} + } + ); + // Even if the price of 100 can be matched in the order book the trigger condition 110 should not trigger + // the matching + assert!(match_result.is_empty()); + assert!( + order_book.pending_orders.get_price_move_up_index().keys().length() == 1 + ); + + // Trigger the pending orders with a price of 110 + let match_results = order_book.trigger_pending_orders(110); + assert!(match_results.length() == 1); + + // Verify taker (buy order) was fully filled + assert!(total_matched_size(&match_results) == 400); + + // Verify maker (sell order) was partially filled + assert!(match_results.length() == 1); + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 400); + assert!(order.get_orig_size() == 1000); + assert!(order.get_remaining_size() == 600); // Partial fill + + // Place another buy order for 300 units + let match_result = + order_book.place_order_and_get_matches( + OrderRequest { + account: @0xBB, + account_order_id: 2, + unique_priority_idx: option::none(), + price: 100, + orig_size: 300, + remaining_size: 300, + is_buy: false, + trigger_condition: option::some(tp_trigger_condition(120)), + metadata: TestMetadata {} + } + ); + + assert!(match_result.is_empty()); + assert!( + order_book.pending_orders.get_price_move_up_index().keys().length() == 1 + ); + + // Oracle price moves down to 100, this should not trigger any order + let match_results = order_book.trigger_pending_orders(100); + assert!(match_results.is_empty()); + + // Move the oracle price up to 120, this should trigger the order + let match_results = order_book.trigger_pending_orders(120); + + // Verify second taker was fully filled + assert!(total_matched_size(&match_results) == 300); + + // Verify original maker was partially filled again + assert!(match_results.length() == 1); + let maker_match = match_results[0]; + let (order, matched_size) = maker_match.destroy_single_order_match(); + assert!(order.get_order_id() == new_order_id_type(@0xAA, 1)); + assert!(matched_size == 300); + assert!(order.get_orig_size() == 1000); + assert!(order.get_remaining_size() == 300); // Still partial as 300 units remain + + // Original sell order should still exist with 300 units remaining + let order_id = new_order_id_type(@0xAA, 1); + let order_state = *order_book.orders.borrow(&order_id); + let (order, is_active) = order_state.destroy_order_from_state(); + let (_order_id, _unique_priority_idx, price, orig_size, size, is_buy, _, _) = + order.destroy_order(); + assert!(is_active == true); + assert!(price == 100); + assert!(orig_size == 1000); + assert!(size == 300); // 1000 - 400 - 300 = 300 remaining + assert!(is_buy == true); + order_book.destroy_order_book(); + } + + #[test] + fun test_maker_order_reinsert_already_exists() { + let order_book = new_order_book(); + + // Place a GTC sell order + let order_req = OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + }; + order_book.place_maker_order(order_req); + assert!(order_book.get_remaining_size(@0xAA, 1) == 1000); + + // Taker order + let order_req = OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 100, + remaining_size: 100, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + }; + + let match_results = order_book.place_order_and_get_matches(order_req); + assert!(total_matched_size(&match_results) == 100); + + let (matched_order, _) = match_results[0].destroy_single_order_match(); + let ( + _order_id, + unique_idx, + price, + orig_size, + _remaining_size, + is_buy, + _trigger_condition, + metadata + ) = matched_order.destroy_order(); + // Assume half of the order was matched and remaining 50 size is reinserted back to the order book + let order_req = OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::some(unique_idx), + price, + orig_size, + remaining_size: 50, + is_buy, + trigger_condition: option::none(), + metadata + }; + order_book.reinsert_maker_order(order_req); + // Verify order was reinserted with updated size + assert!(order_book.get_remaining_size(@0xAA, 1) == 950); + order_book.destroy_order_book(); + } + + #[test] + fun test_maker_order_reinsert_not_exists() { + let order_book = new_order_book(); + + // Place a GTC sell order + let order_req = OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + }; + order_book.place_maker_order(order_req); + assert!(order_book.get_remaining_size(@0xAA, 1) == 1000); + + // Taker order + let order_req = OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: true, + trigger_condition: option::none(), + metadata: TestMetadata {} + }; + + let match_results = order_book.place_order_and_get_matches(order_req); + assert!(total_matched_size(&match_results) == 1000); + + let (matched_order, _) = match_results[0].destroy_single_order_match(); + let ( + _order_id, + unique_idx, + price, + orig_size, + _remaining_size, + is_buy, + _trigger_condition, + metadata + ) = matched_order.destroy_order(); + // Assume half of the order was matched and remaining 50 size is reinserted back to the order book + let order_req = OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::some(unique_idx), + price, + orig_size, + remaining_size: 500, + is_buy, + trigger_condition: option::none(), + metadata + }; + order_book.reinsert_maker_order(order_req); + // Verify order was reinserted with updated size + assert!(order_book.get_remaining_size(@0xAA, 1) == 500); + order_book.destroy_order_book(); + } + + #[test] + fun test_decrease_order_size() { + let order_book = new_order_book(); + + // Place an active order + let order_req = OrderRequest { + account: @0xAA, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::none(), + metadata: TestMetadata {} + }; + order_book.place_maker_order(order_req); + assert!(order_book.get_remaining_size(@0xAA, 1) == 1000); + + order_book.decrease_order_size(@0xAA, 1, 700); + // Verify order was decreased with updated size + assert!(order_book.get_remaining_size(@0xAA, 1) == 300); + + let order_req = OrderRequest { + account: @0xBB, + account_order_id: 1, + unique_priority_idx: option::none(), + price: 100, + orig_size: 1000, + remaining_size: 1000, + is_buy: false, + trigger_condition: option::some(tp_trigger_condition(90)), + metadata: TestMetadata {} + }; + order_book.place_maker_order(order_req); + assert!(order_book.get_remaining_size(@0xBB, 1) == 1000); + order_book.decrease_order_size(@0xBB, 1, 600); + // Verify order was decreased with updated size + assert!(order_book.get_remaining_size(@0xBB, 1) == 400); + + order_book.destroy_order_book(); + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book_types.move b/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book_types.move new file mode 100644 index 0000000000000..d3cccedfac7aa --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/order_book/order_book_types.move @@ -0,0 +1,310 @@ +/// (work in progress) +module aptos_experimental::order_book_types { + use std::option; + use std::option::Option; + use aptos_std::bcs; + use aptos_std::from_bcs; + use aptos_framework::transaction_context; + use aptos_framework::big_ordered_map::{Self, BigOrderedMap}; + friend aptos_experimental::active_order_book; + friend aptos_experimental::order_book; + friend aptos_experimental::pending_order_book_index; + + const U256_MAX: u256 = + 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff; + + const BIG_MAP_INNER_DEGREE: u16 = 64; + const BIG_MAP_LEAF_DEGREE: u16 = 32; + + const EORDER_ALREADY_EXISTS: u64 = 1; + const EINVALID_TRIGGER_CONDITION: u64 = 2; + const INVALID_MATCH_RESULT: u64 = 3; + const EINVALID_ORDER_SIZE_DECREASE: u64 = 4; + + const SLIPPAGE_PCT_PRECISION: u64 = 100; // 100 = 1% + + // to replace types: + struct OrderIdType has store, copy, drop { + account: address, + account_order_id: u64 + } + + struct UniqueIdxType has store, copy, drop { + idx: u256 + } + + struct ActiveMatchedOrder has copy, drop { + order_id: OrderIdType, + matched_size: u64, + /// Remaining size of the maker order + remaining_size: u64 + } + + struct SingleOrderMatch has drop, copy { + order: Order, + matched_size: u64 + } + + struct Order has store, copy, drop { + order_id: OrderIdType, + unique_priority_idx: UniqueIdxType, + price: u64, + orig_size: u64, + remaining_size: u64, + is_bid: bool, + trigger_condition: Option, + metadata: M + } + + enum TriggerCondition has store, drop, copy { + TakeProfit(u64), + StopLoss(u64), + TimeBased(u64) + } + + struct OrderWithState has store, drop, copy { + order: Order, + is_active: bool // i.e. where to find it. + } + + public(friend) fun new_default_big_ordered_map(): BigOrderedMap { + big_ordered_map::new_with_config( + BIG_MAP_INNER_DEGREE, + BIG_MAP_LEAF_DEGREE, + true + ) + } + + public fun get_slippage_pct_precision(): u64 { + SLIPPAGE_PCT_PRECISION + } + + public fun new_time_based_trigger_condition(time: u64): TriggerCondition { + TriggerCondition::TimeBased(time) + } + + public fun new_order_id_type(account: address, account_order_id: u64): OrderIdType { + OrderIdType { account, account_order_id } + } + + public fun generate_unique_idx_fifo_tiebraker(): UniqueIdxType { + // TODO change from random to monothonically increasing value + new_unique_idx_type( + from_bcs::to_u256( + bcs::to_bytes(&transaction_context::generate_auid_address()) + ) + ) + } + + public fun new_unique_idx_type(idx: u256): UniqueIdxType { + UniqueIdxType { idx } + } + + public fun descending_idx(self: &UniqueIdxType): UniqueIdxType { + UniqueIdxType { idx: U256_MAX - self.idx } + } + + public fun new_active_matched_order( + order_id: OrderIdType, matched_size: u64, remaining_size: u64 + ): ActiveMatchedOrder { + ActiveMatchedOrder { order_id, matched_size, remaining_size } + } + + public fun destroy_active_matched_order(self: ActiveMatchedOrder): (OrderIdType, u64, u64) { + (self.order_id, self.matched_size, self.remaining_size) + } + + public fun new_order( + order_id: OrderIdType, + unique_priority_idx: UniqueIdxType, + price: u64, + orig_size: u64, + size: u64, + is_buy: bool, + trigger_condition: Option, + metadata: M + ): Order { + Order { + order_id, + unique_priority_idx, + price, + orig_size, + remaining_size: size, + is_bid: is_buy, + trigger_condition, + metadata + } + } + + public fun new_single_order_match( + order: Order, matched_size: u64 + ): SingleOrderMatch { + SingleOrderMatch { order, matched_size } + } + + public fun get_active_matched_size(self: &ActiveMatchedOrder): u64 { + self.matched_size + } + + public fun get_matched_size( + self: &SingleOrderMatch + ): u64 { + self.matched_size + } + + public fun new_order_with_state( + order: Order, is_active: bool + ): OrderWithState { + OrderWithState { order, is_active } + } + + public fun tp_trigger_condition(take_profit: u64): TriggerCondition { + TriggerCondition::TakeProfit(take_profit) + } + + public fun sl_trigger_condition(stop_loss: u64): TriggerCondition { + TriggerCondition::StopLoss(stop_loss) + } + + // Returns the price move down index and price move up index for a particular trigger condition + public fun index(self: &TriggerCondition, is_buy: bool): + (Option, Option, Option) { + match(self) { + TriggerCondition::TakeProfit(tp) => { + if (is_buy) { + (option::some(*tp), option::none(), option::none()) + } else { + (option::none(), option::some(*tp), option::none()) + } + } + TriggerCondition::StopLoss(sl) => { + if (is_buy) { + (option::none(), option::some(*sl), option::none()) + } else { + (option::some(*sl), option::none(), option::none()) + } + } + TriggerCondition::TimeBased(time) => { + (option::none(), option::none(), option::some(*time)) + } + } + } + + public fun get_order_from_state( + self: &OrderWithState + ): &Order { + &self.order + } + + public fun get_metadata_from_state( + self: &OrderWithState + ): M { + self.order.metadata + } + + public fun get_order_id(self: &Order): OrderIdType { + self.order_id + } + + public fun get_unique_priority_idx(self: &Order): UniqueIdxType { + self.unique_priority_idx + } + + public fun get_metadata_from_order(self: &Order): M { + self.metadata + } + + public fun get_trigger_condition_from_order( + self: &Order + ): Option { + self.trigger_condition + } + + public fun increase_remaining_size( + self: &mut OrderWithState, size: u64 + ) { + self.order.remaining_size += size; + } + + public fun decrease_remaining_size( + self: &mut OrderWithState, size: u64 + ) { + assert!(self.order.remaining_size > size, EINVALID_ORDER_SIZE_DECREASE); + self.order.remaining_size -= size; + } + + public fun set_remaining_size( + self: &mut OrderWithState, remaining_size: u64 + ) { + self.order.remaining_size = remaining_size; + } + + public fun get_remaining_size_from_state( + self: &OrderWithState + ): u64 { + self.order.remaining_size + } + + public fun get_unique_priority_idx_from_state( + self: &OrderWithState + ): UniqueIdxType { + self.order.unique_priority_idx + } + + public fun get_remaining_size(self: &Order): u64 { + self.remaining_size + } + + public fun get_orig_size(self: &Order): u64 { + self.orig_size + } + + public fun destroy_order_from_state( + self: OrderWithState + ): (Order, bool) { + (self.order, self.is_active) + } + + public fun destroy_active_match_order(self: ActiveMatchedOrder): (OrderIdType, u64, u64) { + (self.order_id, self.matched_size, self.remaining_size) + } + + public fun destroy_order( + self: Order + ): (OrderIdType, UniqueIdxType, u64, u64, u64, bool, Option, M) { + ( + self.order_id, + self.unique_priority_idx, + self.price, + self.orig_size, + self.remaining_size, + self.is_bid, + self.trigger_condition, + self.metadata + ) + } + + public fun destroy_single_order_match( + self: SingleOrderMatch + ): (Order, u64) { + (self.order, self.matched_size) + } + + public fun destroy_order_id_type(self: OrderIdType): (address, u64) { + (self.account, self.account_order_id) + } + + public fun is_active_order( + self: &OrderWithState + ): bool { + self.is_active + } + + public fun get_price(self: &Order): u64 { + self.price + } + + public fun is_bid(self: &Order): bool { + self.is_bid + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/order_book/pending_order_book_index.move b/aptos-move/framework/aptos-experimental/sources/trading/order_book/pending_order_book_index.move new file mode 100644 index 0000000000000..abdda107a9cc6 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/order_book/pending_order_book_index.move @@ -0,0 +1,181 @@ +/// (work in progress) +module aptos_experimental::pending_order_book_index { + use std::vector; + use aptos_framework::timestamp; + use aptos_framework::big_ordered_map::BigOrderedMap; + use aptos_experimental::order_book_types::{ + OrderIdType, + UniqueIdxType, + TriggerCondition, + new_default_big_ordered_map + }; + + friend aptos_experimental::order_book; + + struct PendingOrderKey has store, copy, drop { + price: u64, + tie_breaker: UniqueIdxType + } + + enum PendingOrderBookIndex has store { + V1 { + // Order to trigger when the oracle price move less than + price_move_down_index: BigOrderedMap, + // Orders to trigger whem the oracle price move greater than + price_move_up_index: BigOrderedMap, + //time_based_index: BigOrderedMap, ActiveBidData>, + // Orders to trigger when the time is greater than + time_based_index: BigOrderedMap + } + } + + public(friend) fun new_pending_order_book_index(): PendingOrderBookIndex { + PendingOrderBookIndex::V1 { + price_move_up_index: new_default_big_ordered_map(), + price_move_down_index: new_default_big_ordered_map(), + time_based_index: new_default_big_ordered_map() + } + } + + + + public(friend) fun cancel_pending_order( + self: &mut PendingOrderBookIndex, + trigger_condition: TriggerCondition, + unique_priority_idx: UniqueIdxType, + is_buy: bool + ) { + let (price_move_up_index, price_move_down_index, time_based_index) = + trigger_condition.index(is_buy); + if (price_move_up_index.is_some()) { + self.price_move_up_index.remove( + &PendingOrderKey { + price: price_move_up_index.destroy_some(), + tie_breaker: unique_priority_idx + } + ); + }; + if (price_move_down_index.is_some()) { + self.price_move_down_index.remove( + &PendingOrderKey { + price: price_move_down_index.destroy_some(), + tie_breaker: unique_priority_idx + } + ); + }; + if (time_based_index.is_some()) { + self.time_based_index.remove(&time_based_index.destroy_some()); + }; + } + + public(friend) fun place_pending_maker_order( + self: &mut PendingOrderBookIndex, + order_id: OrderIdType, + trigger_condition: TriggerCondition, + unique_priority_idx: UniqueIdxType, + is_buy: bool + ) { + // Add this order to the pending order book index + let (price_move_down_index, price_move_up_index, time_based_index) = + trigger_condition.index(is_buy); + + if (price_move_up_index.is_some()) { + self.price_move_up_index.add( + PendingOrderKey { + price: price_move_up_index.destroy_some(), + tie_breaker: unique_priority_idx + }, + order_id + ); + } else if (price_move_down_index.is_some()) { + self.price_move_down_index.add( + PendingOrderKey { + price: price_move_down_index.destroy_some(), + tie_breaker: unique_priority_idx + }, + order_id + ); + } else if (time_based_index.is_some()) { + self.time_based_index.add(time_based_index.destroy_some(), order_id); + }; + } + + public fun take_ready_price_based_orders( + self: &mut PendingOrderBookIndex, current_price: u64 + ): vector { + let orders = vector::empty(); + while (!self.price_move_up_index.is_empty()) { + let (key, order_id) = self.price_move_up_index.borrow_front(); + if (current_price >= key.price) { + orders.push_back(*order_id); + self.price_move_up_index.remove(&key); + } else { + break; + } + }; + while (!self.price_move_down_index.is_empty()) { + let (key, order_id) = self.price_move_down_index.borrow_back(); + if (current_price <= key.price) { + orders.push_back(*order_id); + self.price_move_down_index.remove(&key); + } else { + break; + } + }; + orders + } + + public fun take_time_time_based_orders( + self: &mut PendingOrderBookIndex + ): vector { + let orders = vector::empty(); + while (!self.time_based_index.is_empty()) { + let current_time = timestamp::now_seconds(); + let (time, order_id) = self.time_based_index.borrow_front(); + if (current_time >= time) { + orders.push_back(*order_id); + self.time_based_index.remove(&time); + } else { + break; + } + }; + orders + } + + #[test_only] + public(friend) fun destroy_pending_order_book_index( + self: PendingOrderBookIndex + ) { + let PendingOrderBookIndex::V1 { + price_move_up_index, + price_move_down_index, + time_based_index + } = self; + price_move_up_index.destroy(|_v| {}); + price_move_down_index.destroy(|_v| {}); + time_based_index.destroy(|_v| {}); + } + + #[test_only] + public(friend) fun get_price_move_down_index( + self: &PendingOrderBookIndex + ): &BigOrderedMap { + &self.price_move_down_index + } + + #[test_only] + public(friend) fun get_price_move_up_index( + self: &PendingOrderBookIndex + ): &BigOrderedMap { + &self.price_move_up_index + } + + #[test_only] + public(friend) fun get_time_based_index( + self: &PendingOrderBookIndex + ): &BigOrderedMap { + &self.time_based_index + } + + +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/tests/event_utils.move b/aptos-move/framework/aptos-experimental/sources/trading/tests/event_utils.move new file mode 100644 index 0000000000000..4499253a61e2a --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/tests/event_utils.move @@ -0,0 +1,28 @@ +#[test_only] +module aptos_experimental::event_utils { + use std::option::Option; + use aptos_framework::event; + struct EventStore has drop { + last_index: u64 + } + + public fun new_event_store(): EventStore { + EventStore { last_index: 0 } + } + + public fun latest_emitted_events( + store: &mut EventStore, limit: Option + ): vector { + let events = event::emitted_events(); + let end_index = + if (limit.is_none()) { + events.length() + } else { + let limit = limit.destroy_some(); + store.last_index + limit + }; + let latest_events = events.slice(store.last_index, end_index); + store.last_index = end_index; + latest_events + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/clearinghouse_test.move b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/clearinghouse_test.move new file mode 100644 index 0000000000000..1065968a948f7 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/clearinghouse_test.move @@ -0,0 +1,183 @@ +#[test_only] +module aptos_experimental::clearinghouse_test { + use std::error; + use std::option; + use std::signer; + use aptos_std::table; + use aptos_std::table::Table; + use aptos_experimental::market_types::{ + SettleTradeResult, + new_settle_trade_result, + MarketClearinghouseCallbacks, + new_market_clearinghouse_callbacks + }; + + const EINVALID_ADDRESS: u64 = 1; + const E_DUPLICATE_ORDER: u64 = 2; + const E_ORDER_NOT_FOUND: u64 = 3; + const E_ORDER_NOT_CLEANED_UP: u64 = 4; + + struct TestOrderMetadata has store, copy, drop {} + + public fun new_test_order_metadata(): TestOrderMetadata { + TestOrderMetadata {} + } + + struct Position has store, drop { + size: u64, + is_long: bool + } + + struct GlobalState has key { + user_positions: Table, + open_orders: Table, + maker_order_calls: Table + } + + public(package) fun initialize(admin: &signer) { + assert!( + signer::address_of(admin) == @0x1, + error::invalid_argument(EINVALID_ADDRESS) + ); + move_to(admin, GlobalState { + user_positions: table::new(), + open_orders: table::new(), + maker_order_calls: table::new() + }); + } + + public(package) fun validate_order_placement(order_id: u64): bool acquires GlobalState { + let open_orders = &mut borrow_global_mut(@0x1).open_orders; + assert!(!open_orders.contains(order_id), error::invalid_argument(E_DUPLICATE_ORDER)); + open_orders.add(order_id, true); + return true + } + + public(package) fun get_position_size(user: address): u64 acquires GlobalState { + let user_positions = &borrow_global(@0x1).user_positions; + if (!user_positions.contains(user)) { + return 0; + }; + user_positions.borrow(user).size + } + + fun update_position( + position: &mut Position, size: u64, is_bid: bool + ) { + if (position.is_long != is_bid) { + if (size > position.size) { + position.size = size - position.size; + position.is_long = is_bid; + } else { + position.size -= size; + } + } else { + position.size += size; + } + } + + public(package) fun settle_trade( + taker: address, + maker: address, + size: u64, + is_taker_long: bool + ): SettleTradeResult acquires GlobalState { + let user_positions = &mut borrow_global_mut(@0x1).user_positions; + let taker_position = + user_positions.borrow_mut_with_default( + taker, Position { size: 0, is_long: true } + ); + update_position(taker_position, size, is_taker_long); + let maker_position = + user_positions.borrow_mut_with_default( + maker, Position { size: 0, is_long: true } + ); + update_position(maker_position, size, !is_taker_long); + new_settle_trade_result(size, option::none(), option::none()) + } + + public(package) fun place_maker_order( + order_id: u64, + ) acquires GlobalState { + let maker_order_calls = &mut borrow_global_mut(@0x1).maker_order_calls; + assert!(!maker_order_calls.contains(order_id), error::invalid_argument(E_DUPLICATE_ORDER)); + maker_order_calls.add(order_id, true); + } + + public(package) fun is_maker_order_called( + order_id: u64 + ): bool acquires GlobalState { + let maker_order_calls = &borrow_global(@0x1).maker_order_calls; + maker_order_calls.contains(order_id) + } + + public(package) fun cleanup_order( + order_id: u64, + ) acquires GlobalState { + let open_orders = &mut borrow_global_mut(@0x1).open_orders; + assert!(open_orders.contains(order_id), error::invalid_argument(E_ORDER_NOT_FOUND)); + open_orders.remove(order_id); + } + + public(package) fun order_exists( + order_id: u64 + ): bool acquires GlobalState { + let open_orders = &borrow_global(@0x1).open_orders; + open_orders.contains(order_id) + } + + public(package) fun settle_trade_with_taker_cancelled( + _taker: address, + _maker: address, + size: u64, + _is_taker_long: bool + ): SettleTradeResult { + new_settle_trade_result( + size / 2, + option::none(), + option::some(std::string::utf8(b"Max open interest violation")) + ) + } + + public(package) fun test_market_callbacks(): + MarketClearinghouseCallbacks acquires GlobalState { + new_market_clearinghouse_callbacks( + |taker, maker, _taker_order_id, _maker_order_id, _fill_id, is_taker_long, _price, size, _taker_metadata, _maker_metadata| { + settle_trade(taker, maker, size, is_taker_long) + }, + | _account, order_id, _is_taker, _is_bid, _price, _size, _order_metadata| { + validate_order_placement(order_id) + }, + |_account, order_id, _is_bid, _price, _size, _order_metadata| { + place_maker_order(order_id); + }, + | _account, _order_id, _is_bid, _remaining_size| { + cleanup_order(_order_id); + }, + | _account, _order_id, _is_bid, _price, _size| { + // decrease order size is not used in this test + }, + ) + } + + public(package) fun test_market_callbacks_with_taker_cancelled(): + MarketClearinghouseCallbacks acquires GlobalState { + new_market_clearinghouse_callbacks( + |taker, maker, _taker_order_id, _maker_order_id, _fill_id, is_taker_long, _price, size, _taker_metadata, _maker_metadata| { + settle_trade_with_taker_cancelled(taker, maker, size, is_taker_long) + }, + | _account, order_id, _is_taker, _is_bid, _price, _size, _order_metadata| { + validate_order_placement(order_id) + }, + |_account, _order_id, _is_bid, _price, _size, _order_metadata| { + // place_maker_order is not used in this test + }, + | _account, _order_id, _is_bid, _remaining_size| { + cleanup_order(_order_id); + }, + | _account, _order_id, _is_bid, _price, _size| { + // decrease order size is not used in this test + }, + ) + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_test_utils.move b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_test_utils.move new file mode 100644 index 0000000000000..4cbc51af82328 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_test_utils.move @@ -0,0 +1,326 @@ +#[test_only] +module aptos_experimental::market_test_utils { + use std::option; + use std::option::Option; + use std::signer; + use aptos_experimental::clearinghouse_test; + use aptos_experimental::event_utils::{latest_emitted_events, EventStore}; + use aptos_experimental::market_types::MarketClearinghouseCallbacks; + + use aptos_experimental::market::{ + order_status_cancelled, + order_status_filled, + order_status_open, + OrderEvent, + Market + }; + + public fun place_maker_order_and_verify( + market: &mut Market, + user: &signer, + price: u64, + size: u64, + is_buy: bool, + time_in_force: u8, + event_store: &mut EventStore, + is_taker: bool, + is_cancelled: bool, + metadata: M, + callbacks: &MarketClearinghouseCallbacks + ): u64 { + let user_addr = signer::address_of(user); + market.place_order( + user, + price, + size, + is_buy, // is_buy + time_in_force, // order_type + option::none(), // trigger_condition + metadata, + 1000, + true, + callbacks + ); + let events = latest_emitted_events(event_store, option::none()); + if (!is_cancelled) { + assert!(events.length() == 1); + } else { + assert!(events.length() == 2); + }; + let order_place_event = events[0]; + let order_id = order_place_event.get_order_id_from_event(); + order_place_event.verify_order_event( + order_id, + market.get_market(), + user_addr, + size, + size, + size, + price, + is_buy, + is_taker, + order_status_open() + ); + if (!is_cancelled) { + // Maker order is opened + assert!(clearinghouse_test::is_maker_order_called(order_id)); + } else { + // Maker order is cancelled + assert!(!clearinghouse_test::is_maker_order_called(order_id)); + }; + if (is_cancelled) { + let order_cancel_event = events[1]; + order_cancel_event.verify_order_event( + order_id, + market.get_market(), + user_addr, + size, + 0, // Remaining size is always 0 when the order is cancelled + size, + price, + is_buy, + is_taker, + order_status_cancelled() + ) + }; + order_id + } + + public fun place_taker_order( + market: &mut Market, + taker: &signer, + taker_price: u64, + size: u64, + is_buy: bool, + time_in_force: u8, + event_store: &mut EventStore, + max_fills: Option, + metadata: M, + callbacks: &MarketClearinghouseCallbacks + ): u64 { + let taker_addr = signer::address_of(taker); + let max_fills = + if (max_fills.is_none()) { 1000 } + else { + max_fills.destroy_some() + }; + // Taker order will be immediately match in the same transaction + market.place_order( + taker, + taker_price, + size, + is_buy, // is_buy + time_in_force, // order_type + option::none(), // trigger_condition + metadata, + max_fills, + true, + callbacks + ); + + let events = latest_emitted_events(event_store, option::some(1)); + let order_place_event = events[0]; + let order_id = order_place_event.get_order_id_from_event(); + // Taker order is opened + order_place_event.verify_order_event( + order_id, + market.get_market(), + taker_addr, + size, + size, + size, + taker_price, + is_buy, + true, + order_status_open() + ); + order_id + } + + public fun place_taker_order_and_verify_fill( + market: &mut Market, + taker: &signer, + taker_price: u64, + size: u64, + is_buy: bool, + time_in_force: u8, + fill_sizes: vector, + fill_prices: vector, + maker_addr: address, + maker_order_ids: vector, + maker_orig_sizes: vector, + maker_remaining_sizes: vector, + event_store: &mut EventStore, + is_cancelled: bool, + max_fills: Option, + metadata: M, + callbacks: &MarketClearinghouseCallbacks + ): u64 { + let order_id = + place_taker_order( + market, + taker, + taker_price, + size, + is_buy, + time_in_force, + event_store, + max_fills, + metadata, + callbacks + ); + + verify_fills( + market, + taker, + order_id, // taker_order_id + taker_price, + size, + is_buy, + fill_sizes, + fill_prices, + maker_addr, + maker_order_ids, + maker_orig_sizes, + maker_remaining_sizes, + event_store, + is_cancelled + ); + + order_id + } + + public fun verify_cancel_event( + market: &mut Market, + user: &signer, + is_taker: bool, + order_id: u64, + price: u64, + orig_size: u64, + remaining_size: u64, + size_delta: u64, + is_buy: bool, + event_store: &mut EventStore + ) { + let user_addr = signer::address_of(user); + let events = latest_emitted_events(event_store, option::some(1)); + assert!(events.length() == 1); + let order_cancel_event = events[0]; + order_cancel_event.verify_order_event( + order_id, + market.get_market(), + user_addr, + orig_size, + remaining_size, + size_delta, + price, // price + is_buy, + is_taker, + order_status_cancelled() + ); + } + + public fun verify_fills( + market: &mut Market, + taker: &signer, + taker_order_id: u64, + taker_price: u64, + size: u64, + is_buy: bool, + fill_sizes: vector, + fill_prices: vector, + maker_addr: address, + maker_order_ids: vector, + maker_orig_sizes: vector, + maker_remaining_sizes: vector, + event_store: &mut EventStore, + is_cancelled: bool + ) { + let taker_addr = signer::address_of(taker); + let total_fill_size = fill_sizes.fold(0, |acc, fill_size| acc + fill_size); + let events = latest_emitted_events(event_store, option::none()); + assert!(fill_sizes.length() == maker_order_ids.length()); + assert!(fill_prices.length() == fill_sizes.length()); + assert!(maker_orig_sizes.length() == fill_sizes.length()); + assert!(size >= total_fill_size); + let is_partial_fill = size > total_fill_size; + let num_expected_events = 2 * fill_sizes.length(); + if (is_cancelled || is_partial_fill) { + // Cancelling (from IOC) will add an extra cancel event + // Partial fill will add an extra open event + num_expected_events += 1; + }; + assert!(events.length() == num_expected_events); + + let fill_index = 0; + let taker_total_fill = 0; + while (fill_index < fill_sizes.length()) { + let fill_size = fill_sizes[fill_index]; + let fill_price = fill_prices[fill_index]; + let maker_orig_size = maker_orig_sizes[fill_index]; + let maker_remaining_size = maker_remaining_sizes[fill_index]; + taker_total_fill += fill_size; + let maker_order_id = maker_order_ids[fill_index]; + // Taker order is filled + let taker_order_fill_event = events[2 * fill_index]; + taker_order_fill_event.verify_order_event( + taker_order_id, + market.get_market(), + taker_addr, + size, + size - taker_total_fill, + fill_size, + fill_price, + is_buy, + true, + order_status_filled() + ); + // Maker order is filled + let maker_order_fill_event = events[1 + 2 * fill_index]; + maker_order_fill_event.verify_order_event( + maker_order_id, + market.get_market(), + maker_addr, + maker_orig_size, + maker_remaining_size - fill_size, + fill_size, + fill_price, + !is_buy, + false, + order_status_filled() + ); + fill_index += 1; + }; + if (is_cancelled) { + // Taker order is cancelled + let order_cancel_event = events[num_expected_events - 1]; + order_cancel_event.verify_order_event( + taker_order_id, + market.get_market(), + taker_addr, + size, + 0, // Remaining size is always 0 when the order is cancelled + size - taker_total_fill, + taker_price, + is_buy, + true, + order_status_cancelled() + ) + } else if (is_partial_fill) { + // Maker order is opened + let order_open_event = events[num_expected_events - 1]; + order_open_event.verify_order_event( + taker_order_id, + market.get_market(), + taker_addr, + size, + size - total_fill_size, + size, + taker_price, + is_buy, + false, + order_status_open() + ) + }; + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_tests.move b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_tests.move new file mode 100644 index 0000000000000..8e32b2907abcc --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/trading/tests/market/market_tests.move @@ -0,0 +1,950 @@ +#[test_only] +module aptos_experimental::market_tests { + use std::option; + use std::signer; + use std::vector; + use aptos_experimental::clearinghouse_test; + use aptos_experimental::clearinghouse_test::{ + test_market_callbacks, + new_test_order_metadata, + get_position_size, + test_market_callbacks_with_taker_cancelled + }; + use aptos_experimental::market_test_utils::{ + place_maker_order_and_verify, + place_taker_order_and_verify_fill, + place_taker_order, + verify_cancel_event, + verify_fills + }; + use aptos_experimental::event_utils; + use aptos_experimental::market::{ + good_till_cancelled, + post_only, + immediate_or_cancel, + new_market, + new_market_config + }; + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_gtc_taker_fully_filled( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + let event_store = event_utils::new_event_store(); + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000, + 2000000, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Order not filled yet, so size is 0 + assert!(get_position_size(maker_addr) == 0); + assert!(get_position_size(taker_addr) == 0); + + let taker_order_id = + place_taker_order_and_verify_fill( + &mut market, + taker, + 1000, + 1000000, + false, + good_till_cancelled(), + vector[1000000], + vector[1000], + maker_addr, + vector[maker_order_id], + vector[2000000], + vector[2000000], + &mut event_store, + false, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + assert!(get_position_size(maker_addr) == 1000000); + assert!(get_position_size(taker_addr) == 1000000); + assert!(clearinghouse_test::order_exists(maker_order_id)); + assert!(!clearinghouse_test::order_exists(taker_order_id)); + + let taker_order_id2 = + place_taker_order_and_verify_fill( + &mut market, + taker, + 1000, + 1000000, + false, + good_till_cancelled(), + vector[1000000], + vector[1000], + maker_addr, + vector[maker_order_id], + vector[2000000], + vector[1000000], + &mut event_store, + false, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + + assert!(get_position_size(maker_addr) == 2000000); + assert!(get_position_size(taker_addr) == 2000000); + // Both orders should be filled and cleaned up + assert!(!clearinghouse_test::order_exists(maker_order_id)); + assert!(!clearinghouse_test::order_exists(taker_order_id2)); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_gtc_taker_partially_filled( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + let event_store = event_utils::new_event_store(); + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000, + 1000000, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + let taker_order_id = + place_taker_order_and_verify_fill( + &mut market, + taker, + 1000, + 2000000, + false, + good_till_cancelled(), + vector[1000000], + vector[1000], + maker_addr, + vector[maker_order_id], + vector[1000000], + vector[1000000], + &mut event_store, + false, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + assert!(get_position_size(maker_addr) == 1000000); + assert!(get_position_size(taker_addr) == 1000000); + assert!(clearinghouse_test::order_exists(taker_order_id)); + assert!(!clearinghouse_test::order_exists(maker_order_id)); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker1 = @0x456, maker2 = @0x789 + )] + public fun test_post_only_success( + admin: &signer, + market_signer: &signer, + maker1: &signer, + maker2: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let maker1_addr = signer::address_of(maker1); + let maker2_addr = signer::address_of(maker2); + + let event_store = event_utils::new_event_store(); + + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker1, + 1000, + 1000000, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Place a post only order that should not match with the maker order + let maker2_order_id = + place_maker_order_and_verify( + &mut market, + maker2, + 1100, + 1000000, + false, // is_buy + post_only(), // order_type + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Make sure no matches triggered by post only order + assert!(get_position_size(maker1_addr) == 0); + assert!(get_position_size(maker2_addr) == 0); + + // Ensure the post only order was posted to the order book + assert!( + market.get_remaining_size(signer::address_of(maker1), maker_order_id) + == 1000000 + ); + assert!( + market.get_remaining_size(signer::address_of(maker2), maker2_order_id) + == 1000000 + ); + + // Verify that the maker order is still active + assert!(clearinghouse_test::order_exists(maker_order_id)); + assert!(clearinghouse_test::order_exists(maker2_order_id)); + + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_post_only_failure( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let event_store = event_utils::new_event_store(); + + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000, + 1000000, + true, // is_buy + good_till_cancelled(), // order_type + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Taker order which is marked as post only but will immediately match - this should fail + let taker_order_id = + place_maker_order_and_verify( + &mut market, + taker, + 1000, + 1000000, + false, // is_buy + post_only(), // order_type + &mut event_store, + true, + true, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Make sure no matches triggered by post only order + assert!(get_position_size(maker_addr) == 0); + assert!(get_position_size(taker_addr) == 0); + + // Ensure the post only order was not posted in the order book + assert!( + market.get_remaining_size(signer::address_of(taker), taker_order_id) == 0 + ); + // Verify that the taker order is not active + assert!(!clearinghouse_test::order_exists(taker_order_id)); + // The maker order should still be active + assert!(clearinghouse_test::order_exists(maker_order_id)); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_ioc_full_match( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let event_store = event_utils::new_event_store(); + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000, + 1000000, + true, // is_buy + good_till_cancelled(), // order_type + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Taker order will be immediately match in the same transaction + let taker_order_id = + place_taker_order_and_verify_fill( + &mut market, + taker, + 1000, + 1000000, + false, // is_buy + immediate_or_cancel(), // order_type + vector[1000000], + vector[1000], + maker_addr, + vector[maker_order_id], + vector[1000000], + vector[1000000], + &mut event_store, + false, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + + assert!(get_position_size(maker_addr) == 1000000); + assert!(get_position_size(taker_addr) == 1000000); + + // Both orders should be filled and cleaned up + assert!(!clearinghouse_test::order_exists(maker_order_id)); + assert!(!clearinghouse_test::order_exists(taker_order_id)); + + assert!( + market.get_remaining_size(signer::address_of(taker), taker_order_id) == 0 + ); + assert!( + market.get_remaining_size(signer::address_of(maker), maker_order_id) == 0 + ); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_ioc_partial_match( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let event_store = event_utils::new_event_store(); + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000, + 1000000, + true, // is_buy + good_till_cancelled(), // order_type + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Taker order is IOC, which will partially match and remaining will be cancelled + let taker_order_id = + place_taker_order_and_verify_fill( + &mut market, + taker, + 1000, + 2000000, + false, // is_buy + immediate_or_cancel(), // order_type + vector[1000000], + vector[1000], + maker_addr, + vector[maker_order_id], + vector[1000000], + vector[1000000], + &mut event_store, + true, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + + assert!(get_position_size(maker_addr) == 1000000); + assert!(get_position_size(taker_addr) == 1000000); + + // Ensure both orders are cleaned up + assert!(!clearinghouse_test::order_exists(maker_order_id)); + assert!(!clearinghouse_test::order_exists(taker_order_id)); + + assert!( + market.get_remaining_size(signer::address_of(taker), taker_order_id) == 0 + ); + assert!( + market.get_remaining_size(signer::address_of(maker), maker_order_id) == 0 + ); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_ioc_no_match( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let event_store = event_utils::new_event_store(); + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000, + 1000000, // 1 BTC + true, // is_buy + good_till_cancelled(), // order_type + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Taker order is IOC, which will not be matched and should be cancelled + let taker_order_id = + place_maker_order_and_verify( + &mut market, + taker, + 1200, + 1000000, // 1 BTC + false, // is_buy + immediate_or_cancel(), // order_type + &mut event_store, + false, // Despite it being a "taker", this order will not cross + true, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Make sure no matches triggered by post only order + assert!(get_position_size(maker_addr) == 0); + assert!(get_position_size(taker_addr) == 0); + + // Ensure the taker order was not posted in the order book and was cleaned up + assert!(!clearinghouse_test::order_exists(taker_order_id)); + // The maker order should still be active + assert!(clearinghouse_test::order_exists(maker_order_id)); + assert!( + market.get_remaining_size(signer::address_of(maker), maker_order_id) + == 1000000 + ); + assert!( + market.get_remaining_size(signer::address_of(taker), taker_order_id) == 0 + ); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_taker_order_partial_fill( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let event_store = event_utils::new_event_store(); + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + // Place maker order + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000, // price + 500000, // 0.5 BTC + true, // is_buy + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Taker order that will fully consume maker order but still have remaining size + let taker_order_id = + place_taker_order_and_verify_fill( + &mut market, + taker, + 1000, + 1000000, // 1 BTC + false, // is_buy + good_till_cancelled(), + vector[500000], // 0.5 BTC + vector[1000], + maker_addr, + vector[maker_order_id], + vector[500000], + vector[500000], + &mut event_store, + false, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Check positions after fill + assert!(get_position_size(maker_addr) == 500000); // Long 0.5 BTC + assert!(get_position_size(taker_addr) == 500000); // Short 0.5 BTC + + // Verify maker order fully filled + assert!(market.get_remaining_size(maker_addr, maker_order_id) == 0); + assert!(!clearinghouse_test::order_exists(maker_order_id)); + + // Taker order partially filled + assert!( + market.get_remaining_size(taker_addr, taker_order_id) == 500000 // 0.5 BTC remaining + ); + assert!(clearinghouse_test::order_exists(taker_order_id)); + + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_taker_order_multiple_fills( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let event_store = event_utils::new_event_store(); + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + // Place several maker order with small sizes. + let i = 1; + let maker_order_ids = vector::empty(); + let expected_fill_sizes = vector::empty(); + let fill_prices = vector::empty(); + let maker_orig_sizes = vector::empty(); + while (i < 6) { + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000 - i, + 10000 * i, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + maker_order_ids.push_back(maker_order_id); + expected_fill_sizes.push_back(10000 * i); + maker_orig_sizes.push_back(10000 * i); + fill_prices.push_back(1000 - i); + i += 1; + }; + let total_fill_size = expected_fill_sizes.fold(0, |acc, x| acc + x); + + // Order not matched yet, so the balance should not change + assert!(get_position_size(maker_addr) == 0); + assert!(get_position_size(taker_addr) == 0); + let taker_order_id = + place_taker_order_and_verify_fill( + &mut market, + taker, + 990, + 1000000, + false, + good_till_cancelled(), + expected_fill_sizes, + fill_prices, + maker_addr, + maker_order_ids, + maker_orig_sizes, + maker_orig_sizes, + &mut event_store, + false, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + assert!(get_position_size(maker_addr) == total_fill_size); + assert!(get_position_size(taker_addr) == total_fill_size); + // Ensure all maker orders are cleaned up + while (maker_order_ids.length() > 0) { + let maker_order_id = maker_order_ids.pop_back(); + assert!(!clearinghouse_test::order_exists(maker_order_id)); + }; + // Taker order should not be cleaned up since it is partially filled + assert!(clearinghouse_test::order_exists(taker_order_id)); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker = @0x456, taker = @0x789 + )] + public fun test_taker_partial_cancelled_maker_reinserted( + admin: &signer, + market_signer: &signer, + maker: &signer, + taker: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let maker_addr = signer::address_of(maker); + let taker_addr = signer::address_of(taker); + + let event_store = event_utils::new_event_store(); + let maker_order_id = + place_maker_order_and_verify( + &mut market, + maker, + 1000, + 2000000, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Order not filled yet, so size is 0 + assert!(get_position_size(maker_addr) == 0); + assert!(get_position_size(taker_addr) == 0); + + let taker_order_id = + place_taker_order_and_verify_fill( + &mut market, + taker, + 1000, + 1000000, + false, + good_till_cancelled(), + vector[500000], // Half of the taker order is filled and half is cancelled + vector[1000], + maker_addr, + vector[maker_order_id], + vector[2000000], + vector[2000000], + &mut event_store, + true, + option::none(), + new_test_order_metadata(), + &test_market_callbacks_with_taker_cancelled() + ); + // Make sure the maker order is reinserted + assert!(market.get_remaining_size(maker_addr, maker_order_id) == 1500000); + assert!(clearinghouse_test::order_exists(maker_order_id)); + assert!(!clearinghouse_test::order_exists(taker_order_id)); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker1 = @0x456, maker2 = @0x789 + )] + public fun test_self_matching_not_allowed( + admin: &signer, + market_signer: &signer, + maker1: &signer, + maker2: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(false, true) + ); + clearinghouse_test::initialize(admin); + let maker1_addr = signer::address_of(maker1); + let maker2_addr = signer::address_of(maker2); + let event_store = event_utils::new_event_store(); + let maker1_order_id = + place_maker_order_and_verify( + &mut market, + maker1, + 1001, + 2000000, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + let maker2_order_id = + place_maker_order_and_verify( + &mut market, + maker2, + 1000, + 2000000, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Order not filled yet, so size is 0 + assert!(get_position_size(maker1_addr) == 0); + + // This should result in a self match order which should be cancelled and maker2 order should be filled + let taker_order_id = + place_taker_order( + &mut market, + maker1, + 1000, + 1000000, + false, + good_till_cancelled(), + &mut event_store, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + + verify_cancel_event( + &mut market, + maker1, + false, + maker1_order_id, + 1001, + 2000000, + 0, + 2000000, + true, + &mut event_store + ); + + verify_fills( + &mut market, + maker1, + taker_order_id, + 1000, + 1000000, + false, + vector[1000000], + vector[1000], + maker2_addr, + vector[maker2_order_id], + vector[2000000], + vector[2000000], + &mut event_store, + false + ); + + assert!(get_position_size(maker1_addr) == 1000000); + assert!(get_position_size(maker2_addr) == 1000000); + market.destroy_market() + } + + #[test( + admin = @0x1, market_signer = @0x123, maker1 = @0x456, maker2 = @0x789 + )] + public fun test_self_matching_allowed( + admin: &signer, + market_signer: &signer, + maker1: &signer, + maker2: &signer + ) { + // Setup accounts + let market = new_market( + admin, + market_signer, + new_market_config(true, true) + ); + clearinghouse_test::initialize(admin); + let maker1_addr = signer::address_of(maker1); + let event_store = event_utils::new_event_store(); + let maker1_order_id = + place_maker_order_and_verify( + &mut market, + maker1, + 1001, + 2000000, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + let _ = + place_maker_order_and_verify( + &mut market, + maker2, + 1000, + 2000000, + true, + good_till_cancelled(), + &mut event_store, + false, + false, + new_test_order_metadata(), + &test_market_callbacks() + ); + + // Order not filled yet, so size is 0 + assert!(get_position_size(maker1_addr) == 0); + + // This should result in a self match order which should be matched against self. + let taker_order_id = + place_taker_order( + &mut market, + maker1, + 1000, + 1000000, + false, + good_till_cancelled(), + &mut event_store, + option::none(), + new_test_order_metadata(), + &test_market_callbacks() + ); + + verify_fills( + &mut market, + maker1, + taker_order_id, + 1001, + 1000000, + false, + vector[1000000], + vector[1001], + maker1_addr, + vector[maker1_order_id], + vector[2000000], + vector[2000000], + &mut event_store, + false + ); + market.destroy_market() + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/veiled_coin/helpers.move b/aptos-move/framework/aptos-experimental/sources/veiled_coin/helpers.move new file mode 100644 index 0000000000000..0e79dfdec8dd6 --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/veiled_coin/helpers.move @@ -0,0 +1,46 @@ +module aptos_experimental::helpers { + use std::vector; + use std::error; + + use aptos_std::ristretto255_elgamal as elgamal; + use aptos_std::ristretto255; + + /// Tried cutting out more elements than are in the vector via `cut_vector`. + const EVECTOR_CUT_TOO_LARGE: u64 = 1; + + /// Given a vector `vec`, removes the last `cut_len` elements of `vec` and returns them in order. (This function + /// exists because we did not like the interface of `std::vector::trim`.) + public fun cut_vector(vec: &mut vector, cut_len: u64): vector { + let len = vector::length(vec); + let res = vector::empty(); + assert!(len >= cut_len, error::out_of_range(EVECTOR_CUT_TOO_LARGE)); + while (cut_len > 0) { + res.push_back(vector::pop_back(vec)); + cut_len -= 1; + }; + res.reverse(); + res + } + + /// Returns an encryption of zero, without any randomness (i.e., $r=0$), under any ElGamal PK. + public fun get_veiled_balance_zero_ciphertext(): elgamal::CompressedCiphertext { + elgamal::ciphertext_from_compressed_points( + ristretto255::point_identity_compressed(), ristretto255::point_identity_compressed()) + } + + /// Returns an encryption of `amount`, without any randomness (i.e., $r=0$), under any ElGamal PK. + /// WARNING: This is not a proper ciphertext: the value `amount` can be easily bruteforced. + public fun public_amount_to_veiled_balance(amount: u32): elgamal::Ciphertext { + let scalar = ristretto255::new_scalar_from_u32(amount); + + elgamal::new_ciphertext_no_randomness(&scalar) + } + + #[test_only] + /// Returns a random ElGamal keypair + public fun generate_elgamal_keypair(): (ristretto255::Scalar, elgamal::CompressedPubkey) { + let sk = ristretto255::random_scalar(); + let pk = elgamal::pubkey_from_secret_key(&sk); + (sk, pk) + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/veiled_coin/sigma_protos.move b/aptos-move/framework/aptos-experimental/sources/veiled_coin/sigma_protos.move new file mode 100644 index 0000000000000..92db1544d5efb --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/veiled_coin/sigma_protos.move @@ -0,0 +1,949 @@ +/// Package for creating, verifying, serializing & deserializing the $\Sigma$-protocol proofs used in veiled coins. +/// +/// ## Preliminaries +/// +/// Recall that a $\Sigma$-protocol proof argues knowledge of a *secret* witness $w$ such that an arithmetic relation +/// $R(x; w) = 1$ is satisfied over group and field elements stored in $x$ and $w$. +/// +/// Here, $x$ is a public statement known to the verifier (i.e., known to the validators). Importantly, the +/// $\Sigma$-protocol's zero-knowledge property ensures the witness $w$ remains secret. +/// +/// ## WithdrawalSubproof: ElGamal-Pedersen equality +/// +/// This proof is used to provably convert an ElGamal ciphertext to a Pedersen commitment over which a ZK range proof +/// can be securely computed. Otherwise, knowledge of the ElGamal SK breaks the binding of the 2nd component of the +/// ElGamal ciphertext, making any ZK range proof over it useless. +/// Because the sender cannot, after receiving a fully veiled transaction, compute their balance randomness, their +/// updated balance ciphertext is computed in the relation, which is then linked to the Pedersen commitment of $b$. +/// +/// The secret witness $w$ in this relation, known only to the sender of the TXN, consists of: +/// - $b$, sender's new balance, after the withdrawal from their veiled balance +/// - $r$, randomness used to commit to $b$ +/// - $sk$, the sender's secret ElGamal encryption key +/// +/// (Note that the $\Sigma$-protocol's zero-knowledge property ensures the witness is not revealed.) +/// +/// The public statement $x$ in this relation consists of: +/// - $G$, basepoint of a given elliptic curve +/// - $H$, basepoint used for randomness in the Pedersen commitments +/// - $(C_1, C_2)$, ElGamal encryption of the sender's current balance +/// - $c$, Pedersen commitment to $b$ with randomness $r$ +/// - $v$, the amount the sender is withdrawing +/// - $Y$, the sender's ElGamal encryption public key +/// +/// The relation being proved is as follows: +/// +/// ``` +/// R( +/// x = [ (C_1, C_2), c, G, H, Y, v] +/// w = [ b, r, sk ] +/// ) = { +/// C_1 - v G = b G + sk C_2 +/// c = b G + r H +/// Y = sk G +/// } +/// ``` +/// +/// ## TransferSubproof: ElGamal-Pedersen equality and ElGamal-ElGamal equality +/// +/// This protocol argues two things. First, that the same amount is ElGamal-encrypted for both the sender and recipient. +/// This is needed to correctly withdraw & deposit the same amount during a transfer. Second, that this same amount is +/// committed via Pedersen. Third, that a Pedersen-committed balance is correctly ElGamal encrypted. ZK range proofs +/// are computed over these last two Pedersen commitments, to prevent overflowing attacks on the balance. +/// +/// The secret witness $w$ in this relation, known only to the sender of the TXN, consists of: +/// - $v$, amount being transferred +/// - $r$, randomness used to ElGamal-encrypt $v$ +/// - $b$, sender's new balance after the transfer occurs +/// - $r_b$, randomness used to Pedersen commit $b$ +/// - $sk$, the sender's secret ElGamal encryption key +/// +/// The public statement $x$ in this relation consists of: +/// - Public parameters +/// + $G$, basepoint of a given elliptic curve +/// + $H$, basepoint used for randomness in the Pedersen commitments +/// - PKs +/// + $Y$, sender's PK +/// + $Y'$, recipient's PK +/// - Amount encryption & commitment +/// + $(C, D)$, ElGamal encryption of $v$, under the sender's PK, using randomness $r$ +/// + $(C', D)$, ElGamal encryption of $v$, under the recipient's PK, using randomness $r$ +/// + $c$, Pedersen commitment to $v$ using randomness $r$ +/// - New balance encryption & commitment +/// + $(C_1, C_2)$, ElGamal encryption of the sender's *current* balance, under the sender's PK. This is used to +/// compute the sender's updated balance in the relation, as the sender cannot know their balance randomness. +/// + $c'$, Pedersen commitment to $b$ using randomness $r_b$ +/// +/// The relation being proved is: +/// ``` +/// R( +/// x = [ Y, Y', (C, C', D), c, (C_1, C_2), c', G, H ] +/// w = [ v, r, b, r_b, sk ] +/// ) = { +/// C = v G + r Y +/// C' = v G + r Y' +/// D = r G +/// C_1 - C = b G + sk (C_2 - D) +/// c = v G + r H +/// c' = b G + r_b H +/// Y = sk G +/// } +/// ``` +/// +/// A relation similar to this is also described on page 14 of the Zether paper [BAZB20] (just replace $G$ -> $g$, +/// $C'$ -> $\bar{C}$, $Y$ -> $y$, $Y'$ -> $\bar{y}$, $v$ -> $b^*$). Note that their relation does not include the +/// ElGamal-to-Pedersen conversion parts, as they can do ZK range proofs directly over ElGamal ciphertexts using their +/// $\Sigma$-bullets modification of Bulletproofs. +module aptos_experimental::sigma_protos { + use std::error; + use std::option::Option; + use std::vector; + + use aptos_std::ristretto255_elgamal as elgamal; + use aptos_std::ristretto255_pedersen as pedersen; + use aptos_std::ristretto255::{Self, RistrettoPoint, Scalar}; + + use aptos_experimental::helpers::cut_vector; + + #[test_only] + use aptos_experimental::helpers::generate_elgamal_keypair; + + // + // Errors + // + + /// The $\Sigma$-protocol proof for withdrawals did not verify. + const ESIGMA_PROTOCOL_VERIFY_FAILED: u64 = 1; + + // + // Constants + // + + /// The domain separation tag (DST) used in the Fiat-Shamir transform of our $\Sigma$-protocol. + const FIAT_SHAMIR_SIGMA_DST : vector = b"AptosVeiledCoin/WithdrawalSubproofFiatShamir"; + + // + // Structs + // + + /// A $\Sigma$-protocol used during an unveiled withdrawal (for proving the correct ElGamal encryption of a + /// Pedersen-committed balance). + struct WithdrawalSubproof has drop { + x1: RistrettoPoint, + x2: RistrettoPoint, + x3: RistrettoPoint, + alpha1: Scalar, + alpha2: Scalar, + alpha3: Scalar, + } + + /// A $\Sigma$-protocol proof used during a veiled transfer. This proof encompasses the $\Sigma$-protocol from + /// `WithdrawalSubproof`. + struct TransferSubproof has drop { + x1: RistrettoPoint, + x2: RistrettoPoint, + x3: RistrettoPoint, + x4: RistrettoPoint, + x5: RistrettoPoint, + x6: RistrettoPoint, + x7: RistrettoPoint, + alpha1: Scalar, + alpha2: Scalar, + alpha3: Scalar, + alpha4: Scalar, + alpha5: Scalar, + } + + // + // Public proof verification functions + // + + /// Verifies a $\Sigma$-protocol proof necessary to ensure correctness of a veiled transfer. + /// + /// Specifically, the proof argues that the same amount $v$ is Pedersen-committed in `comm_amount` and ElGamal- + /// encrypted in `withdraw_ct` (under `sender_pk`) and in `deposit_ct` (under `recipient_pk`), all three using the + /// same randomness $r$. + /// + /// In addition, it argues that the sender's new balance $b$ committed to by sender_new_balance_comm is the same + /// as the value encrypted by the ciphertext obtained by subtracting withdraw_ct from sender_curr_balance_ct + public fun verify_transfer_subproof( + sender_pk: &elgamal::CompressedPubkey, + recipient_pk: &elgamal::CompressedPubkey, + withdraw_ct: &elgamal::Ciphertext, + deposit_ct: &elgamal::Ciphertext, + comm_amount: &pedersen::Commitment, + sender_new_balance_comm: &pedersen::Commitment, + sender_curr_balance_ct: &elgamal::Ciphertext, + proof: &TransferSubproof) + { + let h = pedersen::randomness_base_for_bulletproof(); + let sender_pk_point = elgamal::pubkey_to_point(sender_pk); + let recipient_pk_point = elgamal::pubkey_to_point(recipient_pk); + let (big_c, big_d) = elgamal::ciphertext_as_points(withdraw_ct); + let (bar_big_c, _) = elgamal::ciphertext_as_points(deposit_ct); + let c = pedersen::commitment_as_point(comm_amount); + let (c1, c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct); + let bar_c = pedersen::commitment_as_point(sender_new_balance_comm); + + // TODO: Can be optimized so we don't re-serialize the proof for Fiat-Shamir + let rho = fiat_shamir_transfer_subproof_challenge( + sender_pk, recipient_pk, + withdraw_ct, deposit_ct, comm_amount, + sender_curr_balance_ct, sender_new_balance_comm, + &proof.x1, &proof.x2, &proof.x3, &proof.x4, + &proof.x5, &proof.x6, &proof.x7); + + let g_alpha2 = ristretto255::basepoint_mul(&proof.alpha2); + // \rho * D + X1 =? \alpha_2 * g + let d_acc = ristretto255::point_mul(big_d, &rho); + ristretto255::point_add_assign(&mut d_acc, &proof.x1); + assert!(ristretto255::point_equals(&d_acc, &g_alpha2), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + + let g_alpha1 = ristretto255::basepoint_mul(&proof.alpha1); + // \rho * C + X2 =? \alpha_1 * g + \alpha_2 * y + let big_c_acc = ristretto255::point_mul(big_c, &rho); + ristretto255::point_add_assign(&mut big_c_acc, &proof.x2); + let y_alpha2 = ristretto255::point_mul(&sender_pk_point, &proof.alpha2); + ristretto255::point_add_assign(&mut y_alpha2, &g_alpha1); + assert!(ristretto255::point_equals(&big_c_acc, &y_alpha2), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + + // \rho * \bar{C} + X3 =? \alpha_1 * g + \alpha_2 * \bar{y} + let big_bar_c_acc = ristretto255::point_mul(bar_big_c, &rho); + ristretto255::point_add_assign(&mut big_bar_c_acc, &proof.x3); + let y_bar_alpha2 = ristretto255::point_mul(&recipient_pk_point, &proof.alpha2); + ristretto255::point_add_assign(&mut y_bar_alpha2, &g_alpha1); + assert!(ristretto255::point_equals(&big_bar_c_acc, &y_bar_alpha2), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + + let g_alpha3 = ristretto255::basepoint_mul(&proof.alpha3); + // \rho * (C_1 - C) + X_4 =? \alpha_3 * g + \alpha_5 * (C_2 - D) + let big_c1_acc = ristretto255::point_sub(c1, big_c); + ristretto255::point_mul_assign(&mut big_c1_acc, &rho); + ristretto255::point_add_assign(&mut big_c1_acc, &proof.x4); + + let big_c2_acc = ristretto255::point_sub(c2, big_d); + ristretto255::point_mul_assign(&mut big_c2_acc, &proof.alpha5); + ristretto255::point_add_assign(&mut big_c2_acc, &g_alpha3); + assert!(ristretto255::point_equals(&big_c1_acc, &big_c2_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + + // \rho * c + X_5 =? \alpha_1 * g + \alpha_2 * h + let c_acc = ristretto255::point_mul(c, &rho); + ristretto255::point_add_assign(&mut c_acc, &proof.x5); + + let h_alpha2_acc = ristretto255::point_mul(&h, &proof.alpha2); + ristretto255::point_add_assign(&mut h_alpha2_acc, &g_alpha1); + assert!(ristretto255::point_equals(&c_acc, &h_alpha2_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + + // \rho * \bar{c} + X_6 =? \alpha_3 * g + \alpha_4 * h + let bar_c_acc = ristretto255::point_mul(bar_c, &rho); + ristretto255::point_add_assign(&mut bar_c_acc, &proof.x6); + + let h_alpha4_acc = ristretto255::point_mul(&h, &proof.alpha4); + ristretto255::point_add_assign(&mut h_alpha4_acc, &g_alpha3); + assert!(ristretto255::point_equals(&bar_c_acc, &h_alpha4_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + + // \rho * Y + X_7 =? \alpha_5 * G + let y_acc = ristretto255::point_mul(&sender_pk_point, &rho); + ristretto255::point_add_assign(&mut y_acc, &proof.x7); + + let g_alpha5 = ristretto255::basepoint_mul(&proof.alpha5); + assert!(ristretto255::point_equals(&y_acc, &g_alpha5), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + } + + /// Verifies the $\Sigma$-protocol proof necessary to ensure correctness of a veiled-to-unveiled transfer. + /// + /// Specifically, the proof argues that the same amount $v$ is Pedersen-committed in `sender_new_balance_comm` and + /// ElGamal-encrypted in the ciphertext obtained by subtracting the ciphertext (vG, 0G) from sender_curr_balance_ct + public fun verify_withdrawal_subproof( + sender_pk: &elgamal::CompressedPubkey, + sender_curr_balance_ct: &elgamal::Ciphertext, + sender_new_balance_comm: &pedersen::Commitment, + amount: &Scalar, + proof: &WithdrawalSubproof) + { + let h = pedersen::randomness_base_for_bulletproof(); + let (big_c1, big_c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct); + let c = pedersen::commitment_as_point(sender_new_balance_comm); + let sender_pk_point = elgamal::pubkey_to_point(sender_pk); + + let rho = fiat_shamir_withdrawal_subproof_challenge( + sender_pk, + sender_curr_balance_ct, + sender_new_balance_comm, + amount, + &proof.x1, + &proof.x2, + &proof.x3); + + let g_alpha1 = ristretto255::basepoint_mul(&proof.alpha1); + // \rho * (C_1 - v * g) + X_1 =? \alpha_1 * g + \alpha_3 * C_2 + let gv = ristretto255::basepoint_mul(amount); + let big_c1_acc = ristretto255::point_sub(big_c1, &gv); + ristretto255::point_mul_assign(&mut big_c1_acc, &rho); + ristretto255::point_add_assign(&mut big_c1_acc, &proof.x1); + + let big_c2_acc = ristretto255::point_mul(big_c2, &proof.alpha3); + ristretto255::point_add_assign(&mut big_c2_acc, &g_alpha1); + assert!(ristretto255::point_equals(&big_c1_acc, &big_c2_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + + // \rho * c + X_2 =? \alpha_1 * g + \alpha_2 * h + let c_acc = ristretto255::point_mul(c, &rho); + ristretto255::point_add_assign(&mut c_acc, &proof.x2); + + let h_alpha2_acc = ristretto255::point_mul(&h, &proof.alpha2); + ristretto255::point_add_assign(&mut h_alpha2_acc, &g_alpha1); + assert!(ristretto255::point_equals(&c_acc, &h_alpha2_acc), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + + // \rho * Y + X_3 =? \alpha_3 * g + let y_acc = ristretto255::point_mul(&sender_pk_point, &rho); + ristretto255::point_add_assign(&mut y_acc, &proof.x3); + + let g_alpha3 = ristretto255::basepoint_mul(&proof.alpha3); + assert!(ristretto255::point_equals(&y_acc, &g_alpha3), error::invalid_argument(ESIGMA_PROTOCOL_VERIFY_FAILED)); + } + + // + // Public deserialization functions + // + + /// Deserializes and returns an `WithdrawalSubproof` given its byte representation. + public fun deserialize_withdrawal_subproof(proof_bytes: vector): Option { + if (proof_bytes.length::() != 192) { + return std::option::none() + }; + + let x1_bytes = cut_vector(&mut proof_bytes, 32); + let x1 = ristretto255::new_point_from_bytes(x1_bytes); + if (!x1.is_some::()) { + return std::option::none() + }; + let x1 = x1.extract::(); + + let x2_bytes = cut_vector(&mut proof_bytes, 32); + let x2 = ristretto255::new_point_from_bytes(x2_bytes); + if (!x2.is_some::()) { + return std::option::none() + }; + let x2 = x2.extract::(); + + let x3_bytes = cut_vector(&mut proof_bytes, 32); + let x3 = ristretto255::new_point_from_bytes(x3_bytes); + if (!x3.is_some::()) { + return std::option::none() + }; + let x3 = x3.extract::(); + + let alpha1_bytes = cut_vector(&mut proof_bytes, 32); + let alpha1 = ristretto255::new_scalar_from_bytes(alpha1_bytes); + if (!alpha1.is_some()) { + return std::option::none() + }; + let alpha1 = alpha1.extract(); + + let alpha2_bytes = cut_vector(&mut proof_bytes, 32); + let alpha2 = ristretto255::new_scalar_from_bytes(alpha2_bytes); + if (!alpha2.is_some()) { + return std::option::none() + }; + let alpha2 = alpha2.extract(); + + let alpha3_bytes = cut_vector(&mut proof_bytes, 32); + let alpha3 = ristretto255::new_scalar_from_bytes(alpha3_bytes); + if (!alpha3.is_some()) { + return std::option::none() + }; + let alpha3 = alpha3.extract(); + + std::option::some(WithdrawalSubproof { + x1, x2, x3, alpha1, alpha2, alpha3 + }) + } + + /// Deserializes and returns a `TransferSubproof` given its byte representation. + public fun deserialize_transfer_subproof(proof_bytes: vector): Option { + if (proof_bytes.length::() != 384) { + return std::option::none() + }; + + let x1_bytes = cut_vector(&mut proof_bytes, 32); + let x1 = ristretto255::new_point_from_bytes(x1_bytes); + if (!x1.is_some::()) { + return std::option::none() + }; + let x1 = x1.extract::(); + + let x2_bytes = cut_vector(&mut proof_bytes, 32); + let x2 = ristretto255::new_point_from_bytes(x2_bytes); + if (!x2.is_some::()) { + return std::option::none() + }; + let x2 = x2.extract::(); + + let x3_bytes = cut_vector(&mut proof_bytes, 32); + let x3 = ristretto255::new_point_from_bytes(x3_bytes); + if (!x3.is_some::()) { + return std::option::none() + }; + let x3 = x3.extract::(); + + let x4_bytes = cut_vector(&mut proof_bytes, 32); + let x4 = ristretto255::new_point_from_bytes(x4_bytes); + if (!x4.is_some::()) { + return std::option::none() + }; + let x4 = x4.extract::(); + + let x5_bytes = cut_vector(&mut proof_bytes, 32); + let x5 = ristretto255::new_point_from_bytes(x5_bytes); + if (!x5.is_some::()) { + return std::option::none() + }; + let x5 = x5.extract::(); + + let x6_bytes = cut_vector(&mut proof_bytes, 32); + let x6 = ristretto255::new_point_from_bytes(x6_bytes); + if (!x6.is_some::()) { + return std::option::none() + }; + let x6 = x6.extract::(); + + let x7_bytes = cut_vector(&mut proof_bytes, 32); + let x7 = ristretto255::new_point_from_bytes(x7_bytes); + if (!x7.is_some::()) { + return std::option::none() + }; + let x7 = x7.extract::(); + + let alpha1_bytes = cut_vector(&mut proof_bytes, 32); + let alpha1 = ristretto255::new_scalar_from_bytes(alpha1_bytes); + if (!alpha1.is_some()) { + return std::option::none() + }; + let alpha1 = alpha1.extract(); + + let alpha2_bytes = cut_vector(&mut proof_bytes, 32); + let alpha2 = ristretto255::new_scalar_from_bytes(alpha2_bytes); + if (!alpha2.is_some()) { + return std::option::none() + }; + let alpha2 = alpha2.extract(); + + let alpha3_bytes = cut_vector(&mut proof_bytes, 32); + let alpha3 = ristretto255::new_scalar_from_bytes(alpha3_bytes); + if (!alpha3.is_some()) { + return std::option::none() + }; + let alpha3 = alpha3.extract(); + + let alpha4_bytes = cut_vector(&mut proof_bytes, 32); + let alpha4 = ristretto255::new_scalar_from_bytes(alpha4_bytes); + if (!alpha4.is_some()) { + return std::option::none() + }; + let alpha4 = alpha4.extract(); + + let alpha5_bytes = cut_vector(&mut proof_bytes, 32); + let alpha5 = ristretto255::new_scalar_from_bytes(alpha5_bytes); + if (!alpha5.is_some()) { + return std::option::none() + }; + let alpha5 = alpha5.extract(); + + std::option::some(TransferSubproof { + x1, x2, x3, x4, x5, x6, x7, alpha1, alpha2, alpha3, alpha4, alpha5 + }) + } + + // + // Private functions for Fiat-Shamir challenge derivation + // + + /// Computes a Fiat-Shamir challenge `rho = H(G, H, Y, C_1, C_2, c, x_1, x_2, x_3)` for the `WithdrawalSubproof` + /// $\Sigma$-protocol. + fun fiat_shamir_withdrawal_subproof_challenge( + sender_pk: &elgamal::CompressedPubkey, + sender_curr_balance_ct: &elgamal::Ciphertext, + sender_new_balance_comm: &pedersen::Commitment, + amount: &Scalar, + x1: &RistrettoPoint, + x2: &RistrettoPoint, + x3: &RistrettoPoint): Scalar + { + let (c1, c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct); + let c = pedersen::commitment_as_point(sender_new_balance_comm); + let y = elgamal::pubkey_to_compressed_point(sender_pk); + + let bytes = vector::empty(); + + bytes.append::(FIAT_SHAMIR_SIGMA_DST); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::basepoint_compressed())); + bytes.append::(ristretto255::point_to_bytes( + &ristretto255::point_compress(&pedersen::randomness_base_for_bulletproof()))); + bytes.append::(ristretto255::point_to_bytes(&y)); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(c1))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(c2))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(c))); + bytes.append::(ristretto255::scalar_to_bytes(amount)); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x1))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x2))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x3))); + + ristretto255::new_scalar_from_sha2_512(bytes) + } + + /// Computes a Fiat-Shamir challenge `rho = H(G, H, Y, Y', C, D, c, c_1, c_2, \bar{c}, {X_i}_{i=1}^7)` for the + /// `TransferSubproof` $\Sigma$-protocol. + fun fiat_shamir_transfer_subproof_challenge( + sender_pk: &elgamal::CompressedPubkey, + recipient_pk: &elgamal::CompressedPubkey, + withdraw_ct: &elgamal::Ciphertext, + deposit_ct: &elgamal::Ciphertext, + comm_amount: &pedersen::Commitment, + sender_curr_balance_ct: &elgamal::Ciphertext, + sender_new_balance_comm: &pedersen::Commitment, + x1: &RistrettoPoint, + x2: &RistrettoPoint, + x3: &RistrettoPoint, + x4: &RistrettoPoint, + x5: &RistrettoPoint, + x6: &RistrettoPoint, + x7: &RistrettoPoint): Scalar + { + let y = elgamal::pubkey_to_compressed_point(sender_pk); + let y_prime = elgamal::pubkey_to_compressed_point(recipient_pk); + let (big_c, big_d) = elgamal::ciphertext_as_points(withdraw_ct); + let (big_c_prime, _) = elgamal::ciphertext_as_points(deposit_ct); + let c = pedersen::commitment_as_point(comm_amount); + let (c1, c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct); + let bar_c = pedersen::commitment_as_point(sender_new_balance_comm); + + let bytes = vector::empty(); + + bytes.append::(FIAT_SHAMIR_SIGMA_DST); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::basepoint_compressed())); + bytes.append::(ristretto255::point_to_bytes( + &ristretto255::point_compress(&pedersen::randomness_base_for_bulletproof()))); + bytes.append::(ristretto255::point_to_bytes(&y)); + bytes.append::(ristretto255::point_to_bytes(&y_prime)); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(big_c))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(big_c_prime))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(big_d))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(c))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(c1))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(c2))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(bar_c))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x1))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x2))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x3))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x4))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x5))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x6))); + bytes.append::(ristretto255::point_to_bytes(&ristretto255::point_compress(x7))); + + ristretto255::new_scalar_from_sha2_512(bytes) + } + + // + // Test-only serialization & proving functions + // + + #[test_only] + /// Proves the $\Sigma$-protocol used for veiled-to-unveiled coin transfers. + /// See top-level comments for a detailed description of the $\Sigma$-protocol + public fun prove_withdrawal( + sender_sk: &Scalar, + sender_pk: &elgamal::CompressedPubkey, + sender_curr_balance_ct: &elgamal::Ciphertext, + sender_new_balance_comm: &pedersen::Commitment, + new_balance_val: &Scalar, + amount_val: &Scalar, + new_balance_comm_rand: &Scalar): WithdrawalSubproof + { + let x1 = ristretto255::random_scalar(); + let x2 = ristretto255::random_scalar(); + let x3 = ristretto255::random_scalar(); + let h = pedersen::randomness_base_for_bulletproof(); + let (_, c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct); + + let g_x1 = ristretto255::basepoint_mul(&x1); + // X1 <- x1 * g + x3 * C2 + let big_x1 = ristretto255::point_mul(c2, &x3); + ristretto255::point_add_assign(&mut big_x1, &g_x1); + + // X2 <- x1 * g + x2 * h + let big_x2 = ristretto255::point_mul(&h, &x2); + ristretto255::point_add_assign(&mut big_x2, &g_x1); + + // X3 <- x3 * g + let big_x3 = ristretto255::basepoint_mul(&x3); + + let rho = fiat_shamir_withdrawal_subproof_challenge( + sender_pk, + sender_curr_balance_ct, + sender_new_balance_comm, + amount_val, + &big_x1, + &big_x2, + &big_x3); + + // X3 <- x3 * g + let big_x3 = ristretto255::basepoint_mul(&x3); + + // alpha1 <- x1 + rho * b + let alpha1 = ristretto255::scalar_mul(&rho, new_balance_val); + ristretto255::scalar_add_assign(&mut alpha1, &x1); + + // alpha2 <- x2 + rho * r' + let alpha2 = ristretto255::scalar_mul(&rho, new_balance_comm_rand); + ristretto255::scalar_add_assign(&mut alpha2, &x2); + + // alpha3 <- x3 + rho * sk + let alpha3 = ristretto255::scalar_mul(&rho, sender_sk); + ristretto255::scalar_add_assign(&mut alpha3, &x3); + + WithdrawalSubproof { + x1: big_x1, + x2: big_x2, + x3: big_x3, + alpha1, + alpha2, + alpha3, + } + } + + #[test_only] + /// Proves the $\Sigma$-protocol used for veiled coin transfers. + /// See top-level comments for a detailed description of the $\Sigma$-protocol + public fun prove_transfer( + sender_pk: &elgamal::CompressedPubkey, + sender_sk: &Scalar, + recipient_pk: &elgamal::CompressedPubkey, + withdraw_ct: &elgamal::Ciphertext, + deposit_ct: &elgamal::Ciphertext, + comm_amount: &pedersen::Commitment, + sender_curr_balance_ct: &elgamal::Ciphertext, + sender_new_balance_comm: &pedersen::Commitment, + amount_rand: &Scalar, + amount_val: &Scalar, + new_balance_comm_rand: &Scalar, + new_balance_val: &Scalar): TransferSubproof + { + let x1 = ristretto255::random_scalar(); + let x2 = ristretto255::random_scalar(); + let x3 = ristretto255::random_scalar(); + let x4 = ristretto255::random_scalar(); + let x5 = ristretto255::random_scalar(); + let source_pk_point = elgamal::pubkey_to_point(sender_pk); + let recipient_pk_point = elgamal::pubkey_to_point(recipient_pk); + let h = pedersen::randomness_base_for_bulletproof(); + let (_, c2) = elgamal::ciphertext_as_points(sender_curr_balance_ct); + let (_, d) = elgamal::ciphertext_as_points(withdraw_ct); + + // X1 <- x2 * g + let big_x1 = ristretto255::basepoint_mul(&x2); + + let g_x1 = ristretto255::basepoint_mul(&x1); + // X2 <- x1 * g + x2 * y + let big_x2 = ristretto255::point_mul(&source_pk_point, &x2); + ristretto255::point_add_assign(&mut big_x2, &g_x1); + + // X3 <- x1 * g + x2 * \bar{y} + let big_x3 = ristretto255::point_mul(&recipient_pk_point, &x2); + ristretto255::point_add_assign(&mut big_x3, &g_x1); + + let g_x3 = ristretto255::basepoint_mul(&x3); + // X4 <- x3 * g + x5 * (C_2 - D) + let big_x4 = ristretto255::point_sub(c2, d); + ristretto255::point_mul_assign(&mut big_x4, &x5); + ristretto255::point_add_assign(&mut big_x4, &g_x3); + + // X5 <- x1 * g + x2 * h + let big_x5 = ristretto255::point_mul(&h, &x2); + ristretto255::point_add_assign(&mut big_x5, &g_x1); + + // X6 <- x3 * g + x4 * h + let big_x6 = ristretto255::point_mul(&h, &x4); + ristretto255::point_add_assign(&mut big_x6, &g_x3); + + // X7 <- x5 * g + let big_x7 = ristretto255::basepoint_mul(&x5); + + let rho = fiat_shamir_transfer_subproof_challenge( + sender_pk, recipient_pk, + withdraw_ct, deposit_ct, comm_amount, + sender_curr_balance_ct, sender_new_balance_comm, + &big_x1, &big_x2, &big_x3, &big_x4, + &big_x5, &big_x6, &big_x7); + + // alpha_1 <- x1 + rho * v + let alpha1 = ristretto255::scalar_mul(&rho, amount_val); + ristretto255::scalar_add_assign(&mut alpha1, &x1); + + // alpha_2 <- x2 + rho * r + let alpha2 = ristretto255::scalar_mul(&rho, amount_rand); + ristretto255::scalar_add_assign(&mut alpha2, &x2); + + // alpha_3 <- x3 + rho * b + let alpha3 = ristretto255::scalar_mul(&rho, new_balance_val); + ristretto255::scalar_add_assign(&mut alpha3, &x3); + + // alpha_4 <- x4 + rho * r' + let alpha4 = ristretto255::scalar_mul(&rho, new_balance_comm_rand); + ristretto255::scalar_add_assign(&mut alpha4, &x4); + + // alpha5 <- x5 + rho * sk + let alpha5 = ristretto255::scalar_mul(&rho, sender_sk); + ristretto255::scalar_add_assign(&mut alpha5, &x5); + + TransferSubproof { + x1: big_x1, + x2: big_x2, + x3: big_x3, + x4: big_x4, + x5: big_x5, + x6: big_x6, + x7: big_x7, + alpha1, + alpha2, + alpha3, + alpha4, + alpha5, + } + } + + #[test_only] + /// Given a $\Sigma$-protocol proof for veiled-to-unveiled transfers, serializes it into byte form. + public fun serialize_withdrawal_subproof(proof: &WithdrawalSubproof): vector { + // Reverse-iterates through the fields of the `WithdrawalSubproof` struct, serializes each field, and appends + // it into a vector of bytes which is returned at the end. + let x1_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x1)); + let x2_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x2)); + let x3_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x3)); + let alpha1_bytes = ristretto255::scalar_to_bytes(&proof.alpha1); + let alpha2_bytes = ristretto255::scalar_to_bytes(&proof.alpha2); + let alpha3_bytes = ristretto255::scalar_to_bytes(&proof.alpha3); + + let bytes = vector::empty(); + bytes.append::(alpha3_bytes); + bytes.append::(alpha2_bytes); + bytes.append::(alpha1_bytes); + bytes.append::(x3_bytes); + bytes.append::(x2_bytes); + bytes.append::(x1_bytes); + + bytes + } + + #[test_only] + /// Given a $\Sigma$-protocol proof, serializes it into byte form. + public fun serialize_transfer_subproof(proof: &TransferSubproof): vector { + // Reverse-iterates through the fields of the `TransferSubproof` struct, serializes each field, and appends + // it into a vector of bytes which is returned at the end. + let x1_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x1)); + let x2_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x2)); + let x3_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x3)); + let x4_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x4)); + let x5_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x5)); + let x6_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x6)); + let x7_bytes = ristretto255::point_to_bytes(&ristretto255::point_compress(&proof.x7)); + let alpha1_bytes = ristretto255::scalar_to_bytes(&proof.alpha1); + let alpha2_bytes = ristretto255::scalar_to_bytes(&proof.alpha2); + let alpha3_bytes = ristretto255::scalar_to_bytes(&proof.alpha3); + let alpha4_bytes = ristretto255::scalar_to_bytes(&proof.alpha4); + let alpha5_bytes = ristretto255::scalar_to_bytes(&proof.alpha5); + + let bytes = vector::empty(); + bytes.append::(alpha5_bytes); + bytes.append::(alpha4_bytes); + bytes.append::(alpha3_bytes); + bytes.append::(alpha2_bytes); + bytes.append::(alpha1_bytes); + bytes.append::(x7_bytes); + bytes.append::(x6_bytes); + bytes.append::(x5_bytes); + bytes.append::(x4_bytes); + bytes.append::(x3_bytes); + bytes.append::(x2_bytes); + bytes.append::(x1_bytes); + + bytes + } + + // + // Sigma proof verification tests + // + + #[test_only] + fun verify_transfer_subproof_test(maul_proof: bool) + { + // Pick a keypair for the sender, and one for the recipient + let (sender_sk, sender_pk) = generate_elgamal_keypair(); + let (_, recipient_pk) = generate_elgamal_keypair(); + + // Set the transferred amount to 50 + let amount_val = ristretto255::new_scalar_from_u32(50); + let amount_rand = ristretto255::random_scalar(); + + // Encrypt the amount under the sender's PK + let withdraw_ct = elgamal::new_ciphertext_with_basepoint(&amount_val, &amount_rand, &sender_pk); + // Encrypt the amount under the recipient's PK + let deposit_ct = elgamal::new_ciphertext_with_basepoint(&amount_val, &amount_rand, &recipient_pk); + // Commit to the amount + let comm_amount = pedersen::new_commitment_for_bulletproof(&amount_val, &amount_rand); + + // Set sender's new balance after the transaction to 100 + let curr_balance_val = ristretto255::new_scalar_from_u32(150); + let new_balance_val = ristretto255::new_scalar_from_u32(100); + let new_balance_rand = ristretto255::random_scalar(); + let curr_balance_ct = elgamal::new_ciphertext_with_basepoint(&curr_balance_val, &new_balance_rand, &sender_pk); + + let new_balance_comm = pedersen::new_commitment_for_bulletproof(&new_balance_val, &new_balance_rand); + + let sigma_proof = prove_transfer( + &sender_pk, + &sender_sk, + &recipient_pk, + &withdraw_ct, // withdrawn amount, encrypted under sender PK + &deposit_ct, // deposited amount, encrypted under recipient PK (same plaintext as `withdraw_ct`) + &comm_amount, // commitment to transfer amount to prevent range proof forgery + &curr_balance_ct, // sender's balance before the transaction goes through, encrypted under sender PK + &new_balance_comm, // commitment to sender's balance to prevent range proof forgery + &amount_rand, // encryption randomness for `withdraw_ct` and `deposit_ct` + &amount_val, // transferred amount + &new_balance_rand, // encryption randomness for updated balance ciphertext + &new_balance_val, // sender's balance after the transfer + ); + + if (maul_proof) { + // This should fail the proof verification below + let random_point = ristretto255::random_point(); + sigma_proof.x1 = random_point; + }; + + verify_transfer_subproof( + &sender_pk, + &recipient_pk, + &withdraw_ct, + &deposit_ct, + &comm_amount, + &new_balance_comm, + &curr_balance_ct, + &sigma_proof + ); + } + + #[test] + fun verify_transfer_subproof_succeeds_test() { + verify_transfer_subproof_test(false); + } + + #[test] + #[expected_failure(abort_code = 0x10001, location = Self)] + fun verify_transfer_subproof_fails_test() + { + verify_transfer_subproof_test(true); + } + + #[test_only] + fun verify_withdrawal_subproof_test(maul_proof: bool) + { + // Pick a keypair for the sender + let (sender_sk, sender_pk) = generate_elgamal_keypair(); + + // Set the transferred amount to 50 + let curr_balance = ristretto255::new_scalar_from_u32(100); + let new_balance = ristretto255::new_scalar_from_u32(75); + let amount_withdrawn = ristretto255::new_scalar_from_u32(25); + let rand = ristretto255::random_scalar(); + + // Encrypt the amount under the sender's PK + let curr_balance_ct = elgamal::new_ciphertext_with_basepoint(&curr_balance, &rand, &sender_pk); + // Commit to the amount + let new_balance_comm = pedersen::new_commitment_for_bulletproof(&new_balance, &rand); + + let sigma_proof = prove_withdrawal( + &sender_sk, + &sender_pk, + &curr_balance_ct, + &new_balance_comm, + &new_balance, + &amount_withdrawn, + &rand, + ); + + if (maul_proof) { + // This should fail the proof verification below + let random_point = ristretto255::random_point(); + sigma_proof.x1 = random_point; + }; + + verify_withdrawal_subproof( + &sender_pk, + &curr_balance_ct, + &new_balance_comm, + &amount_withdrawn, + &sigma_proof + ); + } + + #[test] + fun verify_withdrawal_subproof_succeeds_test() { + verify_withdrawal_subproof_test(false); + } + + #[test] + #[expected_failure(abort_code = 0x10001, location = Self)] + fun verify_withdrawal_subproof_fails_test() { + verify_withdrawal_subproof_test(true); + } + + // + // Sigma proof deserialization tests + // + + #[test] + fun serialize_transfer_subproof_test() + { + let (sender_sk, sender_pk) = generate_elgamal_keypair(); + let amount_val = ristretto255::new_scalar_from_u32(50); + let (_, recipient_pk) = generate_elgamal_keypair(); + let amount_rand = ristretto255::random_scalar(); + let withdraw_ct = elgamal::new_ciphertext_with_basepoint(&amount_val, &amount_rand, &sender_pk); + let deposit_ct = elgamal::new_ciphertext_with_basepoint(&amount_val, &amount_rand, &recipient_pk); + let comm_amount = pedersen::new_commitment_for_bulletproof(&amount_val, &amount_rand); + let curr_balance_val = ristretto255::new_scalar_from_u32(150); + let new_balance_val = ristretto255::new_scalar_from_u32(100); + let new_balance_rand = ristretto255::random_scalar(); + let curr_balance_ct = elgamal::new_ciphertext_with_basepoint(&curr_balance_val, &new_balance_rand, &sender_pk); + let new_balance_comm = pedersen::new_commitment_for_bulletproof(&new_balance_val, &new_balance_rand); + + let sigma_proof = prove_transfer( + &sender_pk, + &sender_sk, + &recipient_pk, + &withdraw_ct, + &deposit_ct, + &comm_amount, + &curr_balance_ct, + &new_balance_comm, + &amount_rand, + &amount_val, + &new_balance_rand, + &new_balance_val); + + let sigma_proof_bytes = serialize_transfer_subproof(&sigma_proof); + + let deserialized_proof = deserialize_transfer_subproof(sigma_proof_bytes).extract::(); + + assert!(ristretto255::point_equals(&sigma_proof.x1, &deserialized_proof.x1), 1); + assert!(ristretto255::point_equals(&sigma_proof.x2, &deserialized_proof.x2), 1); + assert!(ristretto255::point_equals(&sigma_proof.x3, &deserialized_proof.x3), 1); + assert!(ristretto255::point_equals(&sigma_proof.x4, &deserialized_proof.x4), 1); + assert!(ristretto255::point_equals(&sigma_proof.x5, &deserialized_proof.x5), 1); + assert!(ristretto255::point_equals(&sigma_proof.x6, &deserialized_proof.x6), 1); + assert!(ristretto255::scalar_equals(&sigma_proof.alpha1, &deserialized_proof.alpha1), 1); + assert!(ristretto255::scalar_equals(&sigma_proof.alpha2, &deserialized_proof.alpha2), 1); + assert!(ristretto255::scalar_equals(&sigma_proof.alpha3, &deserialized_proof.alpha3), 1); + assert!(ristretto255::scalar_equals(&sigma_proof.alpha4, &deserialized_proof.alpha4), 1); + assert!(ristretto255::scalar_equals(&sigma_proof.alpha5, &deserialized_proof.alpha5), 1); + } +} diff --git a/aptos-move/framework/aptos-experimental/sources/veiled_coin/veiled_coin.move b/aptos-move/framework/aptos-experimental/sources/veiled_coin/veiled_coin.move new file mode 100644 index 0000000000000..5b89176afde2b --- /dev/null +++ b/aptos-move/framework/aptos-experimental/sources/veiled_coin/veiled_coin.move @@ -0,0 +1,778 @@ +/// **WARNING:** This is an **experimental, proof-of-concept** module! It is *NOT* production-ready and it will likely +/// lead to loss of funds if used (or misused). +/// +/// This module provides a veiled coin type, denoted `VeiledCoin` that hides the value/denomination of a coin. +/// Importantly, although veiled transactions hide the amount of coins sent they still leak the sender and recipient. +/// +/// ## How to use veiled coins +/// +/// This module allows users to "register" a veiled account for any pre-existing `aptos_framework::Coin` type `T` via +/// the `register` entry function. For this, an encryption public key will need to be given as input, under which +/// the registered user's veiled balance will be encrypted. +/// +/// Once Alice registers a veiled account for `T`, she can call `veil` with any public amount `a` of `T` coins +/// and add them to her veiled balance. Note that these coins will not be properly veiled yet, since they were withdrawn +/// from a public balance, which leaks their value. +/// +/// (Alternatively, another user can initialize Alice's veiled balance by calling `veil_to`.) +/// +/// Suppose Bob also registers and veils `b` of his own coins of type `T`. +/// +/// Now Alice can use `fully_veiled_transfer` to send to Bob a secret amount `v` of coins from her veiled balance. +/// This will, for the first time, properly hide both Alice's and Bob's veiled balance. +/// The only information that an attacker (e.g., an Aptos validator) learns, is that Alice transferred an unknown amount +/// `v` to Bob (including $v=0$), and as a result Alice's veiled balance is in a range [a-v, a] and Bob's veiled balance +/// is in [b, b+v]`. +/// +/// As more veiled transfers occur between more veiled accounts, the uncertainity on the balance of each account becomes +/// larger and larger. +/// +/// Lastly, users can easily withdraw veiled coins back into their public balance via `unveil`. Or, they can withdraw +/// publicly into someone else's public balance via `unveil_to`. +/// +/// ## Terminology +/// +/// 1. *Veiled coin*: a coin whose value is secret; i.e., it is encrypted under the owner's public key. +/// +/// 2. *Veiled amount*: any amount that is secret because it was encrypted under some public key. +/// 3. *Committed amount*: any amount that is secret because it was committed to (rather than encrypted). +/// +/// 4. *Veiled transaction*: a transaction that hides its amount transferred; i.e., a transaction whose amount is veiled. +/// +/// 5. *Veiled balance*: unlike a normal balance, a veiled balance is secret; i.e., it is encrypted under the account's +/// public key. +/// +/// 6. *ZKRP*: zero-knowledge range proofs; one of the key cryptographic ingredient in veiled coins which ensures users +/// can withdraw secretely from their veiled balance without over-withdrawing. +/// +/// ## Limitations +/// +/// **WARNING:** This module is **experimental**! It is *NOT* production-ready. Specifically: +/// +/// 1. Deploying this module will likely lead to lost funds. +/// 2. This module has not been cryptographically-audited. +/// 3. The current implementation is vulnerable to _front-running attacks_ as described in the Zether paper [BAZB20]. +/// 4. There is no integration with wallet software which, for veiled accounts, must maintain an additional ElGamal +/// encryption keypair. +/// 5. There is no support for rotating the ElGamal encryption public key of a veiled account. +/// +/// ## Veiled coin amounts as truncated `u32`'s +/// +/// Veiled coin amounts must be specified as `u32`'s rather than `u64`'s as would be typical for normal coins in the +/// Aptos framework. This is because coin amounts must be encrypted with an *efficient*, additively-homomorphic encryption +/// scheme. Currently, our best candidate is ElGamal encryption in the exponent, which can only decrypt values around +/// 32 bits or slightly larger. +/// +/// Specifically, veiled coin amounts are restricted to be 32 bits and can be cast to a normal 64-bit coin value by +/// setting the leftmost and rightmost 16 bits to zero and the "middle" 32 bits to be the veiled coin bits. +/// +/// This gives veiled amounts ~10 bits for specifying ~3 decimals and ~22 bits for specifying whole amounts, which +/// limits veiled balances and veiled transfers to around 4 million coins. (See `coin.move` for how a normal 64-bit coin +/// value gets interpreted as a decimal number.) +/// +/// In order to convert a `u32` veiled coin amount to a normal `u64` coin amount, we have to shift it left by 16 bits. +/// +/// ``` +/// u64 normal coin amount format: +/// [ left || middle || right ] +/// [ 63 - 32 || 31 - 16 || 15 - 0] +/// +/// u32 veiled coin amount format; we take the middle 32 bits from the u64 format above and store them in a u32: +/// [ middle ] +/// [ 31 - 0 ] +/// ``` +/// +/// Recall that: A coin has a *decimal precision* $d$ (e.g., for `AptosCoin`, $d = 8$; see `initialize` in +/// `aptos_coin.move`). This precision $d$ is used when displaying a `u64` amount, by dividing the amount by $10^d$. +/// For example, if the precision $d = 2$, then a `u64` amount of 505 coins displays as 5.05 coins. +/// +/// For veiled coins, we can easily display a `u32` `Coin` amount $v$ by: +/// 1. Casting $v$ as a u64 and shifting this left by 16 bits, obtaining a 64-bit $v'$ +/// 2. Displaying $v'$ normally, by dividing it by $d$, which is the precision in `CoinInfo`. +/// +/// ## Implementation details +/// +/// This module leverages a so-called "resource account," which helps us mint a `VeiledCoin` from a +/// normal `coin::Coin` by transferring this latter coin into a `coin::CoinStore` stored in the +/// resource account. +/// +/// Later on, when someone wants to convert their `VeiledCoin` into a normal `coin::Coin`, +/// the resource account can be used to transfer out the normal from its coin store. Transferring out a coin like this +/// requires a `signer` for the resource account, which the `veiled_coin` module can obtain via a `SignerCapability`. +/// +/// ## References +/// +/// [BAZB20] Zether: Towards Privacy in a Smart Contract World; by Bunz, Benedikt and Agrawal, Shashank and Zamani, +/// Mahdi and Boneh, Dan; in Financial Cryptography and Data Security; 2020 +module aptos_experimental::veiled_coin { + use std::error; + use std::option::Option; + use std::signer; + use std::vector; + + use aptos_std::ristretto255; + use aptos_std::ristretto255_bulletproofs as bulletproofs; + use aptos_std::ristretto255_bulletproofs::RangeProof; + use aptos_std::ristretto255_elgamal as elgamal; + use aptos_std::ristretto255_pedersen as pedersen; + #[test_only] + use aptos_std::ristretto255::Scalar; + + use aptos_framework::account; + use aptos_framework::coin::{Self, Coin}; + use aptos_framework::event; + + use aptos_experimental::helpers; + use aptos_experimental::sigma_protos; + + // + // Errors + // + + /// The range proof system does not support proofs for any number \in [0, 2^{32}) + const ERANGE_PROOF_SYSTEM_HAS_INSUFFICIENT_RANGE: u64 = 1; + + /// A range proof failed to verify. + const ERANGE_PROOF_VERIFICATION_FAILED: u64 = 2; + + /// Account already has `VeiledCoinStore` registered. + const EVEILED_COIN_STORE_ALREADY_PUBLISHED: u64 = 3; + + /// Account hasn't registered `VeiledCoinStore`. + const EVEILED_COIN_STORE_NOT_PUBLISHED: u64 = 4; + + /// Not enough coins to complete transaction. + const EINSUFFICIENT_BALANCE: u64 = 5; + + /// Failed deserializing bytes into either ElGamal ciphertext or $\Sigma$-protocol proof. + const EDESERIALIZATION_FAILED: u64 = 6; + + /// Byte vector given for deserialization was the wrong length. + const EBYTES_WRONG_LENGTH: u64 = 7; + + /// The `NUM_LEAST_SIGNIFICANT_BITS_REMOVED` and `NUM_MOST_SIGNIFICANT_BITS_REMOVED` constants need to sum to 32 (bits). + const EU64_COIN_AMOUNT_CLAMPING_IS_INCORRECT: u64 = 8; + + /// Non-specific internal error (see source code) + const EINTERNAL_ERROR: u64 = 9; + + // + // Constants + // + + /// The maximum number of bits used to represent a coin's value. + const MAX_BITS_IN_VEILED_COIN_VALUE: u64 = 32; + + /// When converting a `u64` normal (public) amount to a `u32` veiled amount, we keep the middle 32 bits and + /// remove the `NUM_LEAST_SIGNIFICANT_BITS_REMOVED` least significant bits and the `NUM_MOST_SIGNIFICANT_BITS_REMOVED` + /// most significant bits (see comments in the beginning of this file). + /// + /// When converting a `u32` veiled amount to a `u64` normal (public) amount, we simply cast it to `u64` and shift it + /// left by `NUM_LEAST_SIGNIFICANT_BITS_REMOVED`. + const NUM_LEAST_SIGNIFICANT_BITS_REMOVED: u8 = 16; + + /// See `NUM_LEAST_SIGNIFICANT_BITS_REMOVED` comments. + const NUM_MOST_SIGNIFICANT_BITS_REMOVED: u8 = 16; + + /// The domain separation tag (DST) used for the Bulletproofs prover. + const VEILED_COIN_BULLETPROOFS_DST: vector = b"AptosVeiledCoin/BulletproofRangeProof"; + + // + // Structs + // + + /// A holder of a specific coin type and its associated event handles. + /// These are kept in a single resource to ensure locality of data. + struct VeiledCoinStore has key { + /// A ElGamal ciphertext of a value $v \in [0, 2^{32})$, an invariant that is enforced throughout the code. + veiled_balance: elgamal::CompressedCiphertext, + pk: elgamal::CompressedPubkey, + } + + #[event] + /// Event emitted when some amount of veiled coins were deposited into an account. + struct Deposit has drop, store { + // We cannot leak any information about how much has been deposited. + user: address + } + + #[event] + /// Event emitted when some amount of veiled coins were withdrawn from an account. + struct Withdraw has drop, store { + // We cannot leak any information about how much has been withdrawn. + user: address + } + + /// Holds an `account::SignerCapability` for the resource account created when initializing this module. This + /// resource account houses a `coin::CoinStore` for every type of coin `T` that is veiled. + struct VeiledCoinMinter has store, key { + signer_cap: account::SignerCapability, + } + + /// Main structure representing a coin in an account's custody. + struct VeiledCoin { + /// ElGamal ciphertext which encrypts the number of coins $v \in [0, 2^{32})$. This $[0, 2^{32})$ range invariant + /// is enforced throughout the code via Bulletproof-based ZK range proofs. + veiled_amount: elgamal::Ciphertext, + } + + // + // Structs for cryptographic proofs + // + + /// A cryptographic proof that ensures correctness of a veiled-to-veiled coin transfer. + struct TransferProof has drop { + sigma_proof: sigma_protos::TransferSubproof, + zkrp_new_balance: RangeProof, + zkrp_amount: RangeProof, + } + + /// A cryptographic proof that ensures correctness of a veiled-to-*unveiled* coin transfer. + struct WithdrawalProof has drop { + sigma_proof: sigma_protos::WithdrawalSubproof, + zkrp_new_balance: RangeProof, + } + + // + // Module initialization, done only once when this module is first published on the blockchain + // + + /// Initializes a so-called "resource" account which will maintain a `coin::CoinStore` resource for all `Coin`'s + /// that have been converted into a `VeiledCoin`. + fun init_module(deployer: &signer) { + assert!( + bulletproofs::get_max_range_bits() >= MAX_BITS_IN_VEILED_COIN_VALUE, + error::internal(ERANGE_PROOF_SYSTEM_HAS_INSUFFICIENT_RANGE) + ); + + assert!( + NUM_LEAST_SIGNIFICANT_BITS_REMOVED + NUM_MOST_SIGNIFICANT_BITS_REMOVED == 32, + error::internal(EU64_COIN_AMOUNT_CLAMPING_IS_INCORRECT) + ); + + // Create the resource account. This will allow this module to later obtain a `signer` for this account and + // transfer `Coin`'s into its `CoinStore` before minting a `VeiledCoin`. + let (_resource, signer_cap) = account::create_resource_account(deployer, vector::empty()); + + move_to(deployer, + VeiledCoinMinter { + signer_cap + } + ) + } + + // + // Entry functions + // + + /// Initializes a veiled account for the specified `user` such that their balance is encrypted under public key `pk`. + /// Importantly, the user's wallet must retain their corresponding secret key. + public entry fun register(user: &signer, pk: vector) { + let pk = elgamal::new_pubkey_from_bytes(pk); + register_internal(user, pk.extract()); + } + + /// Sends a *public* `amount` of normal coins from `sender` to the `recipient`'s veiled balance. + /// + /// **WARNING:** This function *leaks* the transferred `amount`, since it is given as a public input. + public entry fun veil_to( + sender: &signer, recipient: address, amount: u32) acquires VeiledCoinMinter, VeiledCoinStore + { + let c = coin::withdraw(sender, cast_u32_to_u64_amount(amount)); + + let vc = veiled_mint_from_coin(c); + + veiled_deposit(recipient, vc) + } + + /// Like `veil_to`, except `owner` is both the sender and the recipient. + /// + /// This function can be used by the `owner` to initialize his veiled balance to a *public* value. + /// + /// **WARNING:** The initialized balance is *leaked*, since its initialized `amount` is public here. + public entry fun veil(owner: &signer, amount: u32) acquires VeiledCoinMinter, VeiledCoinStore { + veil_to(owner, signer::address_of(owner), amount) + } + + /// Takes a *public* `amount` of `VeiledCoin` coins from `sender`, unwraps them to a `coin::Coin`, + /// and sends them to `recipient`. Maintains secrecy of `sender`'s new balance. + /// + /// Requires a ZK range proof on the new balance of the sender, to ensure the sender has enough money to send. + /// No ZK range proof is necessary for the `amount`, which is given as a public `u32` value. + /// + /// **WARNING:** This *leaks* the transferred `amount`, since it is a public `u32` argument. + public entry fun unveil_to( + sender: &signer, + recipient: address, + amount: u32, + comm_new_balance: vector, + zkrp_new_balance: vector, + withdraw_subproof: vector) acquires VeiledCoinStore, VeiledCoinMinter + { + // Deserialize all the proofs into their proper Move structs + let comm_new_balance = pedersen::new_commitment_from_bytes(comm_new_balance); + assert!(comm_new_balance.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED)); + + let sigma_proof = sigma_protos::deserialize_withdrawal_subproof(withdraw_subproof); + assert!(std::option::is_some(&sigma_proof), error::invalid_argument(EDESERIALIZATION_FAILED)); + + let comm_new_balance = comm_new_balance.extract(); + let zkrp_new_balance = bulletproofs::range_proof_from_bytes(zkrp_new_balance); + + let withdrawal_proof = WithdrawalProof { + sigma_proof: std::option::extract(&mut sigma_proof), + zkrp_new_balance, + }; + + // Do the actual work + unveil_to_internal(sender, recipient, amount, comm_new_balance, withdrawal_proof); + } + + /// Like `unveil_to`, except the `sender` is also the recipient. + public entry fun unveil( + sender: &signer, + amount: u32, + comm_new_balance: vector, + zkrp_new_balance: vector, + withdraw_subproof: vector) acquires VeiledCoinStore, VeiledCoinMinter + { + unveil_to( + sender, + signer::address_of(sender), + amount, + comm_new_balance, + zkrp_new_balance, + withdraw_subproof + ) + } + + /// Sends a *veiled* amount from `sender` to `recipient`. After this call, the veiled balances of both the `sender` + /// and the `recipient` remain (or become) secret. + /// + /// The sent amount always remains secret; It is encrypted both under the sender's PK (in `withdraw_ct`) & under the + /// recipient's PK (in `deposit_ct`) using the *same* ElGamal randomness, so as to allow for efficiently updating both + /// the sender's & recipient's veiled balances. It is also committed under `comm_amount`, so as to allow for a ZK + /// range proof. + /// + /// Requires a `TransferProof`; i.e.: + /// 1. A range proof `zkrp_new_balance` on the new balance of the sender, to ensure the sender has enough money to + /// send. + /// 2. A range proof `zkrp_amount` on the transferred amount in `comm_amount`, to ensure the sender won't create + /// coins out of thin air. + /// 3. A $\Sigma$-protocol proof `transfer_subproof` which proves that 'withdraw_ct' encrypts the same veiled amount + /// as in 'deposit_ct' (with the same randomness) and as in `comm_amount`. + public entry fun fully_veiled_transfer( + sender: &signer, + recipient: address, + withdraw_ct: vector, + deposit_ct: vector, + comm_new_balance: vector, + comm_amount: vector, + zkrp_new_balance: vector, + zkrp_amount: vector, + transfer_subproof: vector) acquires VeiledCoinStore + { + // Deserialize everything into their proper Move structs + let veiled_withdraw_amount = elgamal::new_ciphertext_from_bytes(withdraw_ct); + assert!(veiled_withdraw_amount.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED)); + + let veiled_deposit_amount = elgamal::new_ciphertext_from_bytes(deposit_ct); + assert!(veiled_deposit_amount.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED)); + + let comm_new_balance = pedersen::new_commitment_from_bytes(comm_new_balance); + assert!(comm_new_balance.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED)); + + let comm_amount = pedersen::new_commitment_from_bytes(comm_amount); + assert!(comm_amount.is_some(), error::invalid_argument(EDESERIALIZATION_FAILED)); + + let transfer_subproof = sigma_protos::deserialize_transfer_subproof(transfer_subproof); + assert!(std::option::is_some(&transfer_subproof), error::invalid_argument(EDESERIALIZATION_FAILED)); + + let transfer_proof = TransferProof { + zkrp_new_balance: bulletproofs::range_proof_from_bytes(zkrp_new_balance), + zkrp_amount: bulletproofs::range_proof_from_bytes(zkrp_amount), + sigma_proof: std::option::extract(&mut transfer_subproof) + }; + + // Do the actual work + fully_veiled_transfer_internal( + sender, + recipient, + veiled_withdraw_amount.extract(), + veiled_deposit_amount.extract(), + comm_new_balance.extract(), + comm_amount.extract(), + &transfer_proof, + ) + } + + // + // Public utility functions, for accessing state and converting u32 veiled coin amounts to u64 normal coin amounts. + // + + /// Clamps a `u64` normal public amount to a `u32` to-be-veiled amount. + /// + /// WARNING: Precision is lost here (see "Veiled coin amounts as truncated `u32`'s" in the top-level comments) + public fun clamp_u64_to_u32_amount(amount: u64): u32 { + // Removes the `NUM_MOST_SIGNIFICANT_BITS_REMOVED` most significant bits. + amount << NUM_MOST_SIGNIFICANT_BITS_REMOVED; + amount >> NUM_MOST_SIGNIFICANT_BITS_REMOVED; + + // Removes the other `32 - NUM_MOST_SIGNIFICANT_BITS_REMOVED` least significant bits. + amount = amount >> NUM_LEAST_SIGNIFICANT_BITS_REMOVED; + + // We are now left with a 32-bit value + (amount as u32) + } + + /// Casts a `u32` to-be-veiled amount to a `u64` normal public amount. No precision is lost here. + public fun cast_u32_to_u64_amount(amount: u32): u64 { + (amount as u64) << NUM_MOST_SIGNIFICANT_BITS_REMOVED + } + + /// Returns `true` if `addr` is registered to receive veiled coins of `CoinType`. + public fun has_veiled_coin_store(addr: address): bool { + exists>(addr) + } + + /// Returns the ElGamal encryption of the value of `coin`. + public fun veiled_amount(coin: &VeiledCoin): &elgamal::Ciphertext { + &coin.veiled_amount + } + + /// Returns the ElGamal encryption of the veiled balance of `owner` for the provided `CoinType`. + public fun veiled_balance(owner: address): elgamal::CompressedCiphertext acquires VeiledCoinStore { + assert!( + has_veiled_coin_store(owner), + error::not_found(EVEILED_COIN_STORE_NOT_PUBLISHED), + ); + + borrow_global>(owner).veiled_balance + } + + /// Given an address `addr`, returns the ElGamal encryption public key associated with that address + public fun encryption_public_key(addr: address): elgamal::CompressedPubkey acquires VeiledCoinStore { + assert!( + has_veiled_coin_store(addr), + error::not_found(EVEILED_COIN_STORE_NOT_PUBLISHED) + ); + + borrow_global_mut>(addr).pk + } + + /// Returns the total supply of veiled coins + public fun total_veiled_coins(): u64 acquires VeiledCoinMinter { + let rsrc_acc_addr = signer::address_of(&get_resource_account_signer()); + assert!(coin::is_account_registered(rsrc_acc_addr), EINTERNAL_ERROR); + + coin::balance(rsrc_acc_addr) + } + + /// Returns the domain separation tag (DST) for constructing Bulletproof-based range proofs in this module. + public fun get_veiled_coin_bulletproofs_dst(): vector { + VEILED_COIN_BULLETPROOFS_DST + } + + /// Returns the maximum # of bits used to represent a veiled coin amount. Might differ than the 64 bits used to + /// represent normal `aptos_framework::coin::Coin` values. + public fun get_max_bits_in_veiled_coin_value(): u64 { + MAX_BITS_IN_VEILED_COIN_VALUE + } + + // + // Public functions that modify veiled balances/accounts/coins + // (These could be made private, but we leave them public since they might be helpful to other contracts building + // efficiently on top of veiled coins.) + // + + /// Like `register`, but the public key has been parsed in a type-safe struct. + /// TODO: Do we want to require a PoK of the SK here? + public fun register_internal(user: &signer, pk: elgamal::CompressedPubkey) { + let account_addr = signer::address_of(user); + assert!( + !has_veiled_coin_store(account_addr), + error::already_exists(EVEILED_COIN_STORE_ALREADY_PUBLISHED), + ); + + // Note: There is no way to find an ElGamal SK such that the `(0_G, 0_G)` ciphertext below decrypts to a non-zero + // value. We'd need to have `(r * G, v * G + r * pk) = (0_G, 0_G)`, which implies `r = 0` for any choice of PK/SK. + // Thus, we must have `v * G = 0_G`, which implies `v = 0`. + + let coin_store = VeiledCoinStore { + veiled_balance: helpers::get_veiled_balance_zero_ciphertext(), + pk, + }; + move_to(user, coin_store); + } + + /// Deposits a veiled `coin` at address `to_addr`. + public fun veiled_deposit(to_addr: address, coin: VeiledCoin) acquires VeiledCoinStore { + assert!( + has_veiled_coin_store(to_addr), + error::not_found(EVEILED_COIN_STORE_NOT_PUBLISHED), + ); + + let veiled_coin_store = borrow_global_mut>(to_addr); + + // Fetch the veiled balance + let veiled_balance = elgamal::decompress_ciphertext(&veiled_coin_store.veiled_balance); + + // Add the veiled amount to the veiled balance (leverages the homomorphism of the encryption scheme) + elgamal::ciphertext_add_assign(&mut veiled_balance, &coin.veiled_amount); + + // Update the veiled balance + veiled_coin_store.veiled_balance = elgamal::compress_ciphertext(&veiled_balance); + + // Make sure the veiled coin is dropped so it cannot be double spent + let VeiledCoin { veiled_amount: _ } = coin; + + // Once successful, emit an event that a veiled deposit occurred. + event::emit( + Deposit { user: to_addr }, + ); + } + + /// Like `unveil_to`, except the proofs have been deserialized into type-safe structs. + public fun unveil_to_internal( + sender: &signer, + recipient: address, + amount: u32, + comm_new_balance: pedersen::Commitment, + withdrawal_proof: WithdrawalProof + ) acquires VeiledCoinStore, VeiledCoinMinter { + let addr = signer::address_of(sender); + assert!( + has_veiled_coin_store(addr), + error::not_found(EVEILED_COIN_STORE_NOT_PUBLISHED) + ); + + // Fetch the sender's ElGamal encryption public key + let sender_pk = encryption_public_key(addr); + + // Fetch the sender's veiled balance + let veiled_coin_store = borrow_global_mut>(addr); + let veiled_balance = elgamal::decompress_ciphertext(&veiled_coin_store.veiled_balance); + + // Create a (not-yet-secure) encryption of `amount`, since `amount` is a public argument here. + let scalar_amount = ristretto255::new_scalar_from_u32(amount); + + // Verify that `comm_new_balance` is a commitment to the remaing balance after withdrawing `amount`. + sigma_protos::verify_withdrawal_subproof( + &sender_pk, + &veiled_balance, + &comm_new_balance, + &scalar_amount, + &withdrawal_proof.sigma_proof); + + // Verify a ZK range proof on `comm_new_balance` (and thus on the remaining `veiled_balance`) + verify_range_proofs( + &comm_new_balance, + &withdrawal_proof.zkrp_new_balance, + &std::option::none(), + &std::option::none()); + + let veiled_amount = elgamal::new_ciphertext_no_randomness(&scalar_amount); + + // Withdraw `amount` from the veiled balance (leverages the homomorphism of the encryption scheme.) + elgamal::ciphertext_sub_assign(&mut veiled_balance, &veiled_amount); + + // Update the veiled balance to reflect the veiled withdrawal + veiled_coin_store.veiled_balance = elgamal::compress_ciphertext(&veiled_balance); + + // Emit event to indicate a veiled withdrawal occurred + event::emit( + Withdraw { user: addr }, + ); + + // Withdraw normal `Coin`'s from the resource account and deposit them in the recipient's + let c = coin::withdraw(&get_resource_account_signer(), cast_u32_to_u64_amount(amount)); + + coin::deposit(recipient, c); + } + + /// Like `fully_veiled_transfer`, except the ciphertext and proofs have been deserialized into type-safe structs. + public fun fully_veiled_transfer_internal( + sender: &signer, + recipient_addr: address, + veiled_withdraw_amount: elgamal::Ciphertext, + veiled_deposit_amount: elgamal::Ciphertext, + comm_new_balance: pedersen::Commitment, + comm_amount: pedersen::Commitment, + transfer_proof: &TransferProof) acquires VeiledCoinStore + { + let sender_addr = signer::address_of(sender); + + let sender_pk = encryption_public_key(sender_addr); + let recipient_pk = encryption_public_key(recipient_addr); + + // Note: The `encryption_public_key` call from above already asserts that `sender_addr` has a coin store. + let sender_veiled_coin_store = borrow_global_mut>(sender_addr); + + // Fetch the veiled balance of the veiled account + let veiled_balance = elgamal::decompress_ciphertext(&sender_veiled_coin_store.veiled_balance); + + // Checks that `veiled_withdraw_amount` and `veiled_deposit_amount` encrypt the same amount of coins, under the + // sender and recipient's PKs. Also checks this amount is committed inside `comm_amount`. Also, checks that the + // new balance encrypted in `veiled_balance` is committed in `comm_new_balance`. + sigma_protos::verify_transfer_subproof( + &sender_pk, + &recipient_pk, + &veiled_withdraw_amount, + &veiled_deposit_amount, + &comm_amount, + &comm_new_balance, + &veiled_balance, + &transfer_proof.sigma_proof); + + // Update the account's veiled balance by homomorphically subtracting the veiled amount from the veiled balance. + elgamal::ciphertext_sub_assign(&mut veiled_balance, &veiled_withdraw_amount); + + + // Verifies range proofs on the transferred amount and the remaining balance + verify_range_proofs( + &comm_new_balance, + &transfer_proof.zkrp_new_balance, + &std::option::some(comm_amount), + &std::option::some(transfer_proof.zkrp_amount)); + + // Update the veiled balance to reflect the veiled withdrawal + sender_veiled_coin_store.veiled_balance = elgamal::compress_ciphertext(&veiled_balance); + + // Once everything succeeds, emit an event to indicate a veiled withdrawal occurred + event::emit( + Withdraw { user: sender_addr }, + ); + + // Create a new veiled coin for the recipient. + let vc = VeiledCoin { veiled_amount: veiled_deposit_amount }; + + // Deposits `veiled_deposit_amount` into the recipient's account + // (Note, if this aborts, the whole transaction aborts, so we do not need to worry about atomicity.) + veiled_deposit(recipient_addr, vc); + } + + /// Verifies range proofs on the remaining balance of an account committed in `comm_new_balance` and, optionally, on + /// the transferred amount committed inside `comm_amount`. + public fun verify_range_proofs( + comm_new_balance: &pedersen::Commitment, + zkrp_new_balance: &RangeProof, + comm_amount: &Option, + zkrp_amount: &Option + ) { + // Let `amount` denote the amount committed in `comm_amount` and `new_bal` the balance committed in `comm_new_balance`. + // + // This function checks if it is possible to withdraw a veiled `amount` from a veiled `bal`, obtaining a new + // veiled balance `new_bal = bal - amount`. This function is used to maintains a key safety invariant throughout + // the veild coin code: i.e., that every account has `new_bal \in [0, 2^{32})`. + // + // This invariant is enforced as follows: + // + // 1. We assume (by the invariant) that `bal \in [0, 2^{32})`. + // + // 2. We verify a ZK range proof that `amount \in [0, 2^{32})`. Otherwise, a sender could set `amount = p-1` + // where `p` is the order of the scalar field, which would give `new_bal = bal - (p-1) mod p = bal + 1`. + // Therefore, a malicious spender could create coins out of thin air for themselves. + // + // 3. We verify a ZK range proof that `new_bal \in [0, 2^{32})`. Otherwise, a sender could set `amount = bal + 1`, + // which would satisfy condition (2) from above but would give `new_bal = bal - (bal + 1) = -1`. Therefore, + // a malicious spender could spend more coins than they have. + // + // Altogether, these checks ensure that `bal - amount >= 0` (as integers) and therefore that `bal >= amount` + // (again, as integers). + // + // When the caller of this function created the `comm_amount` from a public `u32` value, it is guaranteed that + // condition (2) from above holds, so no range proof is necessary. This happens when withdrawing a public + // amount from a veiled balance via `unveil_to` or `unveil`. + + // Checks that the remaining balance is >= 0; i.e., range condition (3) + assert!( + bulletproofs::verify_range_proof_pedersen( + comm_new_balance, + zkrp_new_balance, + MAX_BITS_IN_VEILED_COIN_VALUE, VEILED_COIN_BULLETPROOFS_DST + ), + error::out_of_range(ERANGE_PROOF_VERIFICATION_FAILED) + ); + + // Checks that the transferred amount is in range (when this amount did not originate from a public amount); i.e., range condition (2) + if (zkrp_amount.is_some()) { + assert!( + bulletproofs::verify_range_proof_pedersen( + comm_amount.borrow(), + zkrp_amount.borrow(), + MAX_BITS_IN_VEILED_COIN_VALUE, VEILED_COIN_BULLETPROOFS_DST + ), + error::out_of_range(ERANGE_PROOF_VERIFICATION_FAILED) + ); + }; + } + + // + // Private functions. + // + + /// Returns a signer for the resource account storing all the normal coins that have been veiled. + fun get_resource_account_signer(): signer acquires VeiledCoinMinter { + account::create_signer_with_capability(&borrow_global(@aptos_experimental).signer_cap) + } + + /// Mints a veiled coin from a normal coin, shelving the normal coin into the resource account's coin store. + /// + /// **WARNING:** Fundamentally, there is no way to hide the value of the coin being minted here. + fun veiled_mint_from_coin(c: Coin): VeiledCoin acquires VeiledCoinMinter { + // If there is no `coin::CoinStore` in the resource account, create one. + let rsrc_acc_signer = get_resource_account_signer(); + let rsrc_acc_addr = signer::address_of(&rsrc_acc_signer); + if (!coin::is_account_registered(rsrc_acc_addr)) { + coin::register(&rsrc_acc_signer); + }; + + // Move the normal coin into the coin store, so we can mint a veiled coin. + // (There is no other way to drop a normal coin, for safety reasons, so moving it into a coin store is + // the only option.) + let value_u64 = coin::value(&c); + let value_u32 = clamp_u64_to_u32_amount(value_u64); + + // Paranoid check: assert that the u64 coin value had only its middle 32 bits set (should be the case + // because the caller should have withdrawn a u32 amount, but enforcing this here anyway). + assert!(cast_u32_to_u64_amount(value_u32) == value_u64, error::internal(EINTERNAL_ERROR)); + + // Deposit a normal coin into the resource account... + coin::deposit(rsrc_acc_addr, c); + + // ...and mint a veiled coin, which is backed by the normal coin + VeiledCoin { + veiled_amount: helpers::public_amount_to_veiled_balance(value_u32) + } + } + + // + // Test-only functions + // + + #[test_only] + /// Returns true if the balance at address `owner` equals `value`. + /// Requires the ElGamal encryption randomness `r` and public key `pk` as auxiliary inputs. + public fun verify_opened_balance( + owner: address, value: u32, r: &Scalar, pk: &elgamal::CompressedPubkey): bool acquires VeiledCoinStore + { + // compute the expected encrypted balance + let value = ristretto255::new_scalar_from_u32(value); + let expected_ct = elgamal::new_ciphertext_with_basepoint(&value, r, pk); + + // get the actual encrypted balance + let actual_ct = elgamal::decompress_ciphertext(&veiled_balance(owner)); + + elgamal::ciphertext_equals(&actual_ct, &expected_ct) + } + + #[test_only] + /// So we can call this from `veiled_coin_tests.move`. + public fun init_module_for_testing(deployer: &signer) { + init_module(deployer) + } + + // + // Unit tests are available in `veiled_coin_tests.move`. + // +} diff --git a/aptos-move/framework/aptos-experimental/tests/confidential_asset/confidential_asset_tests.move b/aptos-move/framework/aptos-experimental/tests/confidential_asset/confidential_asset_tests.move new file mode 100644 index 0000000000000..1a2c0ed24d90f --- /dev/null +++ b/aptos-move/framework/aptos-experimental/tests/confidential_asset/confidential_asset_tests.move @@ -0,0 +1,685 @@ +#[test_only] +module aptos_experimental::confidential_asset_tests { + use std::features; + use std::option; + use std::signer; + use std::string::utf8; + use aptos_std::ristretto255::Scalar; + use aptos_framework::account; + use aptos_framework::chain_id; + use aptos_framework::coin; + use aptos_framework::fungible_asset::{Self, Metadata}; + use aptos_framework::object::{Self, Object}; + use aptos_framework::primary_fungible_store; + + use aptos_experimental::confidential_asset; + use aptos_experimental::confidential_balance; + use aptos_experimental::confidential_proof; + use aptos_experimental::ristretto255_twisted_elgamal::{Self as twisted_elgamal, generate_twisted_elgamal_keypair}; + + struct MockCoin {} + + fun withdraw( + sender: &signer, + sender_dk: &Scalar, + token: Object, + to: address, + amount: u64, + new_amount: u128) + { + let from = signer::address_of(sender); + let sender_ek = confidential_asset::encryption_key(from, token); + let current_balance = confidential_balance::decompress_balance( + &confidential_asset::actual_balance(from, token) + ); + + let (proof, new_balance) = confidential_proof::prove_withdrawal( + sender_dk, + &sender_ek, + amount, + new_amount, + ¤t_balance + ); + + let new_balance = confidential_balance::balance_to_bytes(&new_balance); + let (sigma_proof, zkrp_new_balance) = confidential_proof::serialize_withdrawal_proof(&proof); + + if (signer::address_of(sender) == to) { + confidential_asset::withdraw(sender, token, amount, new_balance, zkrp_new_balance, sigma_proof); + } else { + confidential_asset::withdraw_to(sender, token, to, amount, new_balance, zkrp_new_balance, sigma_proof); + } + } + + fun transfer( + sender: &signer, + sender_dk: &Scalar, + token: Object, + to: address, + amount: u64, + new_amount: u128) + { + let from = signer::address_of(sender); + let sender_ek = confidential_asset::encryption_key(from, token); + let recipient_ek = confidential_asset::encryption_key(to, token); + let current_balance = confidential_balance::decompress_balance( + &confidential_asset::actual_balance(from, token) + ); + + let ( + proof, + new_balance, + sender_amount, + recipient_amount, + _ + ) = confidential_proof::prove_transfer( + sender_dk, + &sender_ek, + &recipient_ek, + amount, + new_amount, + ¤t_balance, + &vector[], + ); + + let (sigma_proof, zkrp_new_balance, zkrp_transfer_amount) = confidential_proof::serialize_transfer_proof( + &proof + ); + + confidential_asset::confidential_transfer( + sender, + token, + to, + confidential_balance::balance_to_bytes(&new_balance), + confidential_balance::balance_to_bytes(&sender_amount), + confidential_balance::balance_to_bytes(&recipient_amount), + b"", + b"", + zkrp_new_balance, + zkrp_transfer_amount, + sigma_proof + ); + } + + fun audit_transfer( + sender: &signer, + sender_dk: &Scalar, + token: Object, + to: address, + amount: u64, + new_amount: u128, + auditor_eks: &vector): vector + { + let from = signer::address_of(sender); + let sender_ek = confidential_asset::encryption_key(from, token); + let recipient_ek = confidential_asset::encryption_key(to, token); + let current_balance = confidential_balance::decompress_balance( + &confidential_asset::actual_balance(from, token) + ); + + let ( + proof, + new_balance, + sender_amount, + recipient_amount, + auditor_amounts + ) = confidential_proof::prove_transfer( + sender_dk, + &sender_ek, + &recipient_ek, + amount, + new_amount, + ¤t_balance, + auditor_eks, + ); + + let (sigma_proof, zkrp_new_balance, zkrp_transfer_amount) = confidential_proof::serialize_transfer_proof( + &proof + ); + + confidential_asset::confidential_transfer( + sender, + token, + to, + confidential_balance::balance_to_bytes(&new_balance), + confidential_balance::balance_to_bytes(&sender_amount), + confidential_balance::balance_to_bytes(&recipient_amount), + confidential_asset::serialize_auditor_eks(auditor_eks), + confidential_asset::serialize_auditor_amounts(&auditor_amounts), + zkrp_new_balance, + zkrp_transfer_amount, + sigma_proof + ); + + auditor_amounts + } + + fun rotate( + sender: &signer, + sender_dk: &Scalar, + token: Object, + new_dk: &Scalar, + new_ek: &twisted_elgamal::CompressedPubkey, + amount: u128) + { + let from = signer::address_of(sender); + let sender_ek = confidential_asset::encryption_key(from, token); + let current_balance = confidential_balance::decompress_balance( + &confidential_asset::actual_balance(from, token) + ); + + let (proof, new_balance) = confidential_proof::prove_rotation( + sender_dk, + new_dk, + &sender_ek, + new_ek, + amount, + ¤t_balance + ); + + let (sigma_proof, zkrp_new_balance) = confidential_proof::serialize_rotation_proof(&proof); + + confidential_asset::rotate_encryption_key( + sender, + token, + twisted_elgamal::pubkey_to_bytes(new_ek), + confidential_balance::balance_to_bytes(&new_balance), + zkrp_new_balance, + sigma_proof + ); + } + + fun normalize( + sender: &signer, + sender_dk: &Scalar, + token: Object, + amount: u128) + { + let from = signer::address_of(sender); + let sender_ek = confidential_asset::encryption_key(from, token); + let current_balance = confidential_balance::decompress_balance( + &confidential_asset::actual_balance(from, token) + ); + + let (proof, new_balance) = confidential_proof::prove_normalization( + sender_dk, + &sender_ek, + amount, + ¤t_balance); + + let (sigma_proof, zkrp_new_balance) = confidential_proof::serialize_normalization_proof(&proof); + + confidential_asset::normalize( + sender, + token, + confidential_balance::balance_to_bytes(&new_balance), + zkrp_new_balance, + sigma_proof + ); + } + + public fun set_up_for_confidential_asset_test( + confidential_asset: &signer, + aptos_fx: &signer, + fa: &signer, + sender: &signer, + recipient: &signer, + sender_amount: u64, + recipient_amount: u64): Object + { + chain_id::initialize_for_test(aptos_fx, 4); + + let ctor_ref = &object::create_sticky_object(signer::address_of(fa)); + + primary_fungible_store::create_primary_store_enabled_fungible_asset( + ctor_ref, + option::none(), + utf8(b"MockToken"), + utf8(b"MT"), + 18, + utf8(b"https://"), + utf8(b"https://"), + ); + + let mint_ref = fungible_asset::generate_mint_ref(ctor_ref); + + assert!(signer::address_of(aptos_fx) != signer::address_of(sender), 1); + assert!(signer::address_of(aptos_fx) != signer::address_of(recipient), 2); + + confidential_asset::init_module_for_testing(confidential_asset); + + features::change_feature_flags_for_testing(aptos_fx, vector[features::get_bulletproofs_feature()], vector[]); + + let token = object::object_from_constructor_ref(ctor_ref); + + let sender_store = primary_fungible_store::ensure_primary_store_exists(signer::address_of(sender), token); + fungible_asset::mint_to(&mint_ref, sender_store, sender_amount); + + let recipient_store = primary_fungible_store::ensure_primary_store_exists(signer::address_of(recipient), token); + fungible_asset::mint_to(&mint_ref, recipient_store, recipient_amount); + + token + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1, + bob = @0xb0 + )] + fun success_deposit_test( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer, + bob: signer) + { + let token = set_up_for_confidential_asset_test(&confidential_asset, &aptos_fx, &fa, &alice, &bob, 500, 500); + + let alice_addr = signer::address_of(&alice); + let bob_addr = signer::address_of(&bob); + + let (alice_dk, alice_ek) = generate_twisted_elgamal_keypair(); + let (bob_dk, bob_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + confidential_asset::register(&bob, token, twisted_elgamal::pubkey_to_bytes(&bob_ek)); + + confidential_asset::deposit(&alice, token, 100); + confidential_asset::deposit_to(&alice, token, bob_addr, 150); + + assert!(primary_fungible_store::balance(alice_addr, token) == 250, 1); + assert!(confidential_asset::verify_pending_balance(alice_addr, token, &alice_dk, 100), 1); + assert!(confidential_asset::verify_pending_balance(bob_addr, token, &bob_dk, 150), 1); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1, + bob = @0xb0 + )] + fun success_withdraw_test( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer, + bob: signer) + { + let token = set_up_for_confidential_asset_test(&confidential_asset, &aptos_fx, &fa, &alice, &bob, 500, 500); + + let alice_addr = signer::address_of(&alice); + let bob_addr = signer::address_of(&bob); + + let (alice_dk, alice_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + + confidential_asset::deposit(&alice, token, 200); + confidential_asset::rollover_pending_balance(&alice, token); + + withdraw(&alice, &alice_dk, token, bob_addr, 50, 150); + + assert!(primary_fungible_store::balance(bob_addr, token) == 550, 1); + assert!(confidential_asset::verify_actual_balance(alice_addr, token, &alice_dk, 150), 1); + + withdraw(&alice, &alice_dk, token, alice_addr, 50, 100); + + assert!(primary_fungible_store::balance(alice_addr, token) == 350, 1); + assert!(confidential_asset::verify_actual_balance(alice_addr, token, &alice_dk, 100), 1); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1, + bob = @0xb0 + )] + fun success_transfer_test( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer, + bob: signer) + { + let token = set_up_for_confidential_asset_test(&confidential_asset, &aptos_fx, &fa, &alice, &bob, 500, 500); + + let alice_addr = signer::address_of(&alice); + let bob_addr = signer::address_of(&bob); + + let (alice_dk, alice_ek) = generate_twisted_elgamal_keypair(); + let (bob_dk, bob_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + confidential_asset::register(&bob, token, twisted_elgamal::pubkey_to_bytes(&bob_ek)); + + confidential_asset::deposit(&alice, token, 200); + confidential_asset::rollover_pending_balance(&alice, token); + + transfer(&alice, &alice_dk, token, bob_addr, 100, 100); + + assert!(confidential_asset::verify_actual_balance(alice_addr, token, &alice_dk, 100), 1); + assert!(confidential_asset::verify_pending_balance(bob_addr, token, &bob_dk, 100), 1); + + transfer(&alice, &alice_dk, token, alice_addr, 100, 0); + + assert!(confidential_asset::verify_actual_balance(alice_addr, token, &alice_dk, 0), 1); + assert!(confidential_asset::verify_pending_balance(alice_addr, token, &alice_dk, 100), 1); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1, + bob = @0xb0 + )] + fun success_audit_transfer_test( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer, + bob: signer) + { + let token = set_up_for_confidential_asset_test(&confidential_asset, &aptos_fx, &fa, &alice, &bob, 500, 500); + + let alice_addr = signer::address_of(&alice); + let bob_addr = signer::address_of(&bob); + + let (alice_dk, alice_ek) = generate_twisted_elgamal_keypair(); + let (bob_dk, bob_ek) = generate_twisted_elgamal_keypair(); + let (auditor1_dk, auditor1_ek) = generate_twisted_elgamal_keypair(); + let (auditor2_dk, auditor2_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::set_auditor( + &aptos_fx, + token, + twisted_elgamal::pubkey_to_bytes(&auditor1_ek)); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + confidential_asset::register(&bob, token, twisted_elgamal::pubkey_to_bytes(&bob_ek)); + + confidential_asset::deposit(&alice, token, 200); + confidential_asset::rollover_pending_balance(&alice, token); + + let auditor_amounts = audit_transfer( + &alice, + &alice_dk, + token, + bob_addr, + 100, + 100, + &vector[auditor1_ek, auditor2_ek]); + + assert!(confidential_asset::verify_actual_balance(alice_addr, token, &alice_dk, 100), 1); + assert!(confidential_asset::verify_pending_balance(bob_addr, token, &bob_dk, 100), 1); + + assert!(confidential_balance::verify_pending_balance(&auditor_amounts[0], &auditor1_dk, 100), 1); + assert!(confidential_balance::verify_pending_balance(&auditor_amounts[1], &auditor2_dk, 100), 1); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1, + bob = @0xb0 + )] + #[expected_failure(abort_code = 0x010006, location = confidential_asset)] + fun fail_audit_transfer_if_wrong_auditor_list( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer, + bob: signer) + { + let token = set_up_for_confidential_asset_test(&confidential_asset, &aptos_fx, &fa, &alice, &bob, 500, 500); + + let bob_addr = signer::address_of(&bob); + + let (alice_dk, alice_ek) = generate_twisted_elgamal_keypair(); + let (_, bob_ek) = generate_twisted_elgamal_keypair(); + let (_, auditor1_ek) = generate_twisted_elgamal_keypair(); + let (_, auditor2_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::set_auditor( + &aptos_fx, + token, + twisted_elgamal::pubkey_to_bytes(&auditor1_ek)); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + confidential_asset::register(&bob, token, twisted_elgamal::pubkey_to_bytes(&bob_ek)); + + confidential_asset::deposit(&alice, token, 200); + confidential_asset::rollover_pending_balance(&alice, token); + + // This fails because the `auditor1` is set for `token`, + // so each transfer must include `auditor1` in the auditor list as the FIRST element. + // Please, see `confidential_asset::validate_auditors` for more details. + audit_transfer( + &alice, + &alice_dk, + token, + bob_addr, + 100, + 100, + &vector[auditor2_ek, auditor1_ek]); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1, + bob = @0xb0 + )] + fun success_rotate( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer, + bob: signer) + { + let token = set_up_for_confidential_asset_test(&confidential_asset, &aptos_fx, &fa, &alice, &bob, 500, 500); + + let alice_addr = signer::address_of(&alice); + let bob_addr = signer::address_of(&bob); + + let (alice_dk, alice_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + + confidential_asset::deposit(&alice, token, 200); + confidential_asset::rollover_pending_balance(&alice, token); + + withdraw(&alice, &alice_dk, token, bob_addr, 50, 150); + + let (new_alice_dk, new_alice_ek) = generate_twisted_elgamal_keypair(); + + rotate(&alice, &alice_dk, token, &new_alice_dk, &new_alice_ek, 150); + + assert!(confidential_asset::encryption_key(alice_addr, token) == new_alice_ek, 1); + assert!(confidential_asset::verify_actual_balance(alice_addr, token, &new_alice_dk, 150), 1); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1, + bob = @0xb0 + )] + fun success_normalize( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer, + bob: signer) + { + let max_chunk_value = 1 << 16 - 1; + let token = set_up_for_confidential_asset_test( + &confidential_asset, + &aptos_fx, + &fa, + &alice, + &bob, + max_chunk_value, + max_chunk_value + ); + + let alice_addr = signer::address_of(&alice); + + let (alice_dk, alice_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + + confidential_asset::deposit(&alice, token, max_chunk_value); + confidential_asset::deposit_to(&bob, token, alice_addr, max_chunk_value); + + confidential_asset::rollover_pending_balance(&alice, token); + + assert!(!confidential_asset::is_normalized(alice_addr, token)); + assert!( + !confidential_asset::verify_actual_balance(alice_addr, token, &alice_dk, (2 * max_chunk_value as u128)), + 1 + ); + + normalize(&alice, &alice_dk, token, (2 * max_chunk_value as u128)); + + assert!(confidential_asset::is_normalized(alice_addr, token)); + assert!( + confidential_asset::verify_actual_balance(alice_addr, token, &alice_dk, (2 * max_chunk_value as u128)), 1); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1 + )] + #[expected_failure(abort_code = 0x01000D, location = confidential_asset)] + fun fail_register_if_token_disallowed( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer) + { + let token = set_up_for_confidential_asset_test(&confidential_asset, &aptos_fx, &fa, &alice, &alice, 500, 500); + + confidential_asset::enable_allow_list(&aptos_fx); + + let (_, alice_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + fa = @0xfa, + alice = @0xa1 + )] + fun success_register_if_token_allowed( + confidential_asset: signer, + aptos_fx: signer, + fa: signer, + alice: signer) + { + let token = set_up_for_confidential_asset_test(&confidential_asset, &aptos_fx, &fa, &alice, &alice, 500, 500); + + confidential_asset::enable_allow_list(&aptos_fx); + confidential_asset::enable_token(&aptos_fx, token); + + let (_, alice_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + alice = @0xa1 + )] + fun fail_deposit_with_coins_if_insufficient_amount( + confidential_asset: signer, + aptos_fx: signer, + alice: signer) + { + chain_id::initialize_for_test(&aptos_fx, 4); + confidential_asset::init_module_for_testing(&confidential_asset); + coin::create_coin_conversion_map(&aptos_fx); + + let alice_addr = signer::address_of(&alice); + + let (burn_cap, freeze_cap, mint_cap) = coin::initialize( + &confidential_asset, utf8(b"MockCoin"), utf8(b"MC"), 0, false); + + let coin_amount = coin::mint(100, &mint_cap); + coin::destroy_burn_cap(burn_cap); + coin::destroy_freeze_cap(freeze_cap); + coin::destroy_mint_cap(mint_cap); + + account::create_account_if_does_not_exist(alice_addr); + coin::register(&alice); + coin::deposit(alice_addr, coin_amount); + + coin::create_pairing(&aptos_fx); + + let token = coin::paired_metadata().extract(); + + let (_, alice_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + confidential_asset::deposit(&alice, token, 100); + } + + #[test( + confidential_asset = @aptos_experimental, + aptos_fx = @aptos_framework, + alice = @0xa1, + )] + fun success_deposit_with_coins( + confidential_asset: signer, + aptos_fx: signer, + alice: signer) + { + chain_id::initialize_for_test(&aptos_fx, 4); + confidential_asset::init_module_for_testing(&confidential_asset); + coin::create_coin_conversion_map(&aptos_fx); + + let alice_addr = signer::address_of(&alice); + + let (burn_cap, freeze_cap, mint_cap) = coin::initialize( + &confidential_asset, utf8(b"MockCoin"), utf8(b"MC"), 0, false); + + let coin_amount = coin::mint(100, &mint_cap); + coin::destroy_burn_cap(burn_cap); + coin::destroy_freeze_cap(freeze_cap); + coin::destroy_mint_cap(mint_cap); + + account::create_account_if_does_not_exist(alice_addr); + coin::register(&alice); + coin::deposit(alice_addr, coin_amount); + + coin::create_pairing(&aptos_fx); + + let token = coin::paired_metadata().extract(); + + let (alice_dk, alice_ek) = generate_twisted_elgamal_keypair(); + + confidential_asset::register(&alice, token, twisted_elgamal::pubkey_to_bytes(&alice_ek)); + + assert!(coin::balance(alice_addr) == 100, 1); + assert!(primary_fungible_store::balance(alice_addr, token) == 100, 1); + assert!(confidential_asset::verify_pending_balance(alice_addr, token, &alice_dk, 0), 1); + + confidential_asset::deposit_coins(&alice, 50); + + assert!(coin::balance(alice_addr) == 50, 1); + assert!(primary_fungible_store::balance(alice_addr, token) == 50, 1); + assert!(confidential_asset::verify_pending_balance(alice_addr, token, &alice_dk, 50), 1); + } +} diff --git a/aptos-move/framework/aptos-experimental/tests/confidential_asset/confidential_proof_tests.move b/aptos-move/framework/aptos-experimental/tests/confidential_asset/confidential_proof_tests.move new file mode 100644 index 0000000000000..9a5ed516a715d --- /dev/null +++ b/aptos-move/framework/aptos-experimental/tests/confidential_asset/confidential_proof_tests.move @@ -0,0 +1,569 @@ +#[test_only] +module aptos_experimental::confidential_proof_tests { + use aptos_experimental::confidential_balance; + use aptos_experimental::confidential_proof; + use aptos_experimental::ristretto255_twisted_elgamal::{Self as twisted_elgamal, generate_twisted_elgamal_keypair}; + + struct WithdrawParameters has drop { + ek: twisted_elgamal::CompressedPubkey, + amount: u64, + current_balance: confidential_balance::ConfidentialBalance, + new_balance: confidential_balance::ConfidentialBalance, + proof: confidential_proof::WithdrawalProof, + } + + struct TransferParameters has drop { + sender_ek: twisted_elgamal::CompressedPubkey, + recipient_ek: twisted_elgamal::CompressedPubkey, + amount: u64, + new_amount: u128, + current_balance: confidential_balance::ConfidentialBalance, + new_balance: confidential_balance::ConfidentialBalance, + sender_amount: confidential_balance::ConfidentialBalance, + recipient_amount: confidential_balance::ConfidentialBalance, + auditor_eks: vector, + auditor_amounts: vector, + proof: confidential_proof::TransferProof, + } + + struct RotationParameters has drop { + current_ek: twisted_elgamal::CompressedPubkey, + new_ek: twisted_elgamal::CompressedPubkey, + amount: u128, + current_balance: confidential_balance::ConfidentialBalance, + new_balance: confidential_balance::ConfidentialBalance, + proof: confidential_proof::RotationProof, + } + + struct NormalizationParameters has drop { + ek: twisted_elgamal::CompressedPubkey, + amount: u128, + current_balance: confidential_balance::ConfidentialBalance, + new_balance: confidential_balance::ConfidentialBalance, + proof: confidential_proof::NormalizationProof, + } + + fun withdraw(): WithdrawParameters { + withdraw_with_params(150, 100, 50) + } + + fun withdraw_with_params(current_amount: u128, new_amount: u128, amount: u64): WithdrawParameters { + let (dk, ek) = generate_twisted_elgamal_keypair(); + + let current_balance_r = confidential_balance::generate_balance_randomness(); + + let current_balance = confidential_balance::new_actual_balance_from_u128( + current_amount, + ¤t_balance_r, + &ek + ); + + let ( + proof, + new_balance + ) = confidential_proof::prove_withdrawal( + &dk, + &ek, + amount, + new_amount, + ¤t_balance, + ); + + WithdrawParameters { + ek, + amount, + current_balance, + new_balance, + proof, + } + } + + fun transfer(): TransferParameters { + transfer_with_parameters(150, 100, 50) + } + + fun transfer_with_parameters(current_amount: u128, new_amount: u128, amount: u64): TransferParameters { + let (sender_dk, sender_ek) = generate_twisted_elgamal_keypair(); + let (_, recipient_ek) = generate_twisted_elgamal_keypair(); + + let current_balance_r = confidential_balance::generate_balance_randomness(); + let current_balance = confidential_balance::new_actual_balance_from_u128( + current_amount, + ¤t_balance_r, + &sender_ek + ); + + let (_, auditor_ek) = generate_twisted_elgamal_keypair(); + + let auditor_eks = vector[auditor_ek]; + + let ( + proof, + new_balance, + sender_amount, + recipient_amount, + auditor_amounts, + ) = confidential_proof::prove_transfer( + &sender_dk, + &sender_ek, + &recipient_ek, + amount, + new_amount, + ¤t_balance, + &auditor_eks, + ); + + TransferParameters { + sender_ek, + recipient_ek, + amount, + new_amount, + current_balance, + new_balance, + sender_amount, + recipient_amount, + auditor_eks, + auditor_amounts, + proof, + } + } + + fun rotate(): RotationParameters { + let (current_dk, current_ek) = generate_twisted_elgamal_keypair(); + let (new_dk, new_ek) = generate_twisted_elgamal_keypair(); + + let amount = 150; + + let current_balance_r = confidential_balance::generate_balance_randomness(); + let current_balance = confidential_balance::new_actual_balance_from_u128( + amount, + ¤t_balance_r, + ¤t_ek + ); + + let ( + proof, + new_balance, + ) = confidential_proof::prove_rotation( + ¤t_dk, + &new_dk, + ¤t_ek, + &new_ek, + amount, + ¤t_balance + ); + + RotationParameters { + current_ek, + new_ek, + amount, + current_balance, + new_balance, + proof, + } + } + + fun normalize(): NormalizationParameters { + let (dk, ek) = generate_twisted_elgamal_keypair(); + + let amount = 1 << 16; + + let current_balance_r = confidential_balance::generate_balance_randomness(); + let current_balance = confidential_balance::new_actual_balance_from_u128(amount / 2, ¤t_balance_r, &ek); + confidential_balance::add_balances_mut( + &mut current_balance, + &confidential_balance::new_actual_balance_from_u128(amount / 2, ¤t_balance_r, &ek)); + + let ( + proof, + new_balance + ) = confidential_proof::prove_normalization( + &dk, + &ek, + amount, + ¤t_balance, + ); + + NormalizationParameters { + ek, + amount, + current_balance, + new_balance, + proof, + } + } + + #[test] + fun success_withdraw() { + let params = withdraw(); + + confidential_proof::verify_withdrawal_proof( + ¶ms.ek, + params.amount, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_withdraw_if_wrong_amount() { + let params = withdraw(); + + confidential_proof::verify_withdrawal_proof( + ¶ms.ek, + 1000, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_withdraw_if_wrong_current_balance() { + let params = withdraw(); + + confidential_proof::verify_withdrawal_proof( + ¶ms.ek, + params.amount, + &confidential_balance::new_actual_balance_from_u128( + 1000, + &confidential_balance::generate_balance_randomness(), + ¶ms.ek + ), + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_withdraw_if_wrong_new_balance() { + let params = withdraw(); + + confidential_proof::verify_withdrawal_proof( + ¶ms.ek, + params.amount, + ¶ms.current_balance, + &confidential_balance::new_actual_balance_from_u128( + 1000, + &confidential_balance::generate_balance_randomness(), + ¶ms.ek + ), + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_withdraw_if_negative_new_balance() { + // 0 - 1 = max_uint128 + let max_uint128 = 340282366920938463463374607431768211455; + let params = withdraw_with_params(0, max_uint128 - 1, 1); + + confidential_proof::verify_withdrawal_proof( + ¶ms.ek, + params.amount, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + fun success_transfer() { + let params = transfer(); + + confidential_proof::verify_transfer_proof( + ¶ms.sender_ek, + ¶ms.recipient_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.sender_amount, + ¶ms.recipient_amount, + ¶ms.auditor_eks, + ¶ms.auditor_amounts, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_transfer_if_wrong_sender_ek() { + let params = transfer(); + + confidential_proof::verify_transfer_proof( + ¶ms.recipient_ek, + ¶ms.recipient_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.sender_amount, + ¶ms.recipient_amount, + ¶ms.auditor_eks, + ¶ms.auditor_amounts, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_transfer_if_wrong_recipient_ek() { + let params = transfer(); + + confidential_proof::verify_transfer_proof( + ¶ms.sender_ek, + ¶ms.sender_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.sender_amount, + ¶ms.recipient_amount, + ¶ms.auditor_eks, + ¶ms.auditor_amounts, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_transfer_if_wrong_current_balance() { + let params = transfer(); + + confidential_proof::verify_transfer_proof( + ¶ms.sender_ek, + ¶ms.recipient_ek, + &confidential_balance::new_actual_balance_from_u128( + 1000, + &confidential_balance::generate_balance_randomness(), + ¶ms.sender_ek + ), + ¶ms.new_balance, + ¶ms.sender_amount, + ¶ms.recipient_amount, + ¶ms.auditor_eks, + ¶ms.auditor_amounts, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_transfer_if_negative_new_balance() { + // 0 - 1 = max_uint128 + let max_uint128 = 340282366920938463463374607431768211455; + let params = transfer_with_parameters(0, max_uint128 - 1, 1); + + confidential_proof::verify_transfer_proof( + ¶ms.sender_ek, + ¶ms.recipient_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.sender_amount, + ¶ms.recipient_amount, + ¶ms.auditor_eks, + ¶ms.auditor_amounts, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_transfer_if_wrong_sender_amount() { + let params = transfer(); + + confidential_proof::verify_transfer_proof( + ¶ms.sender_ek, + ¶ms.recipient_ek, + ¶ms.current_balance, + ¶ms.new_balance, + &confidential_balance::new_pending_balance_from_u64( + 1000, &confidential_balance::generate_balance_randomness(), ¶ms.recipient_ek), + ¶ms.recipient_amount, + ¶ms.auditor_eks, + ¶ms.auditor_amounts, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_transfer_if_wrong_recipient_amount() { + let params = transfer(); + + confidential_proof::verify_transfer_proof( + ¶ms.sender_ek, + ¶ms.recipient_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.sender_amount, + &confidential_balance::new_pending_balance_from_u64( + 1000, &confidential_balance::generate_balance_randomness(), ¶ms.recipient_ek), + ¶ms.auditor_eks, + ¶ms.auditor_amounts, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_transfer_if_wrong_auditor_eks() { + let params = transfer(); + + let (_, auditor_ek) = generate_twisted_elgamal_keypair(); + let auditor_eks = vector[auditor_ek]; + + confidential_proof::verify_transfer_proof( + ¶ms.sender_ek, + ¶ms.recipient_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.sender_amount, + ¶ms.recipient_amount, + &auditor_eks, + ¶ms.auditor_amounts, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_transfer_if_wrong_auditor_amounts() { + let params = transfer(); + + let (_, auditor_ek) = generate_twisted_elgamal_keypair(); + let auditor_amount = confidential_balance::new_pending_balance_from_u64( + 1000, + &confidential_balance::generate_balance_randomness(), + &auditor_ek + ); + let auditor_amounts = vector[auditor_amount]; + + confidential_proof::verify_transfer_proof( + ¶ms.sender_ek, + ¶ms.recipient_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.sender_amount, + ¶ms.recipient_amount, + ¶ms.auditor_eks, + &auditor_amounts, + ¶ms.proof); + } + + #[test] + fun success_rotate() { + let params = rotate(); + + confidential_proof::verify_rotation_proof( + ¶ms.current_ek, + ¶ms.new_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_rotate_if_wrong_current_ek() { + let params = rotate(); + + confidential_proof::verify_rotation_proof( + ¶ms.new_ek, + ¶ms.new_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_rotate_if_wrong_new_ek() { + let params = rotate(); + + confidential_proof::verify_rotation_proof( + ¶ms.current_ek, + ¶ms.current_ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_rotate_if_wrong_current_balance() { + let params = rotate(); + + confidential_proof::verify_rotation_proof( + ¶ms.current_ek, + ¶ms.new_ek, + &confidential_balance::new_actual_balance_from_u128( + 1000, + &confidential_balance::generate_balance_randomness(), + ¶ms.current_ek + ), + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_rotate_if_wrong_new_balance() { + let params = rotate(); + + confidential_proof::verify_rotation_proof( + ¶ms.current_ek, + ¶ms.new_ek, + ¶ms.current_balance, + &confidential_balance::new_actual_balance_from_u128( + 1000, + &confidential_balance::generate_balance_randomness(), + ¶ms.new_ek + ), + ¶ms.proof); + } + + #[test] + fun success_normalize() { + let params = normalize(); + + confidential_proof::verify_normalization_proof( + ¶ms.ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_normalize_if_wrong_ek() { + let params = normalize(); + + let (_, ek) = generate_twisted_elgamal_keypair(); + + confidential_proof::verify_normalization_proof( + &ek, + ¶ms.current_balance, + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_normalize_if_wrong_current_balance() { + let params = normalize(); + + confidential_proof::verify_normalization_proof( + ¶ms.ek, + &confidential_balance::new_actual_balance_from_u128( + 1000, + &confidential_balance::generate_balance_randomness(), + ¶ms.ek + ), + ¶ms.new_balance, + ¶ms.proof); + } + + #[test] + #[expected_failure(abort_code = 0x010001, location = confidential_proof)] + fun fail_normalize_if_wrong_new_balance() { + let params = normalize(); + + confidential_proof::verify_normalization_proof( + ¶ms.ek, + ¶ms.current_balance, + &confidential_balance::new_actual_balance_from_u128( + 1000, + &confidential_balance::generate_balance_randomness(), + ¶ms.ek + ), + ¶ms.proof); + } +} diff --git a/aptos-move/framework/aptos-experimental/tests/veiled_coin/veiled_coin_tests.move b/aptos-move/framework/aptos-experimental/tests/veiled_coin/veiled_coin_tests.move new file mode 100644 index 0000000000000..b2d634ea85bca --- /dev/null +++ b/aptos-move/framework/aptos-experimental/tests/veiled_coin/veiled_coin_tests.move @@ -0,0 +1,415 @@ +/// Tests for veiled coins. +/// +/// TODO: improve testing framework; currently very cumbersome to set up a veiled payment test +/// TODO: test that payments to self return successfully (ideally, they should do nothing) +module aptos_experimental::veiled_coin_tests { + #[test_only] + use std::features; + #[test_only] + use std::signer; + #[test_only] + use std::string::utf8; + + #[test_only] + use aptos_std::ristretto255_bulletproofs as bulletproofs; + #[test_only] + use aptos_std::debug::print; + #[test_only] + use aptos_std::ristretto255_elgamal as elgamal; + #[test_only] + use aptos_std::ristretto255; + #[test_only] + use aptos_std::ristretto255_pedersen as pedersen; + + #[test_only] + use aptos_framework::account; + #[test_only] + use aptos_framework::coin; + + #[test_only] + use aptos_experimental::veiled_coin; + #[test_only] + use aptos_experimental::helpers::generate_elgamal_keypair; + #[test_only] + use aptos_experimental::sigma_protos::{serialize_withdrawal_subproof, prove_withdrawal}; + #[test_only] + use aptos_experimental::sigma_protos; + + // + // Test-only functions + // + + #[test_only] + /// Initializes the `veiled_coin` module and sets up a `sender` account with `sender_amount` + `recipient_amount` + /// of `FakeCoin`'s. Then, sends `recipient_amount` of coins from `sender` to `recipient`. + /// + /// Can be called with `sender` set to be equal to `recipient`. + fun set_up_for_veiled_coin_test( + veiled_coin: &signer, + aptos_fx: signer, + sender: &signer, + recipient: &signer, + sender_amount: u32, + recipient_amount: u32, + ) { + // Assumption is that framework address is different than recipient and sender addresses + assert!(signer::address_of(&aptos_fx) != signer::address_of(sender), 1); + assert!(signer::address_of(&aptos_fx) != signer::address_of(recipient), 2); + + // Initialize the `veiled_coin` module & enable the feature + veiled_coin::init_module_for_testing(veiled_coin); + println(b"Initialized module."); + features::change_feature_flags_for_testing(&aptos_fx, vector[features::get_bulletproofs_feature()], vector[]); + println(b"Enabled feature flags."); + + // Set up an account for the framework address + account::create_account_for_test(signer::address_of(&aptos_fx)); // needed in `coin::create_fake_money` + account::create_account_for_test(signer::address_of(sender)); // needed in `coin::transfer` + if (signer::address_of(recipient) != signer::address_of(sender)) { + account::create_account_for_test(signer::address_of(recipient)); // needed in `coin::transfer` + }; + println(b"Created accounts for test."); + + // Create `amount` of `FakeCoin` coins at the Aptos 0x1 address (must do) and register a `FakeCoin` coin + // store for the `sender`. + coin::create_fake_money( + &aptos_fx, + sender, + veiled_coin::cast_u32_to_u64_amount(sender_amount + recipient_amount)); + println(b"Created fake money inside @aptos_framework"); + + // Transfer some coins from the framework to the sender + coin::transfer( + &aptos_fx, + signer::address_of(sender), + veiled_coin::cast_u32_to_u64_amount(sender_amount)); + println(b"Transferred some fake money to the sender."); + + // Transfer some coins from the sender to the recipient + coin::register(recipient); + coin::transfer( + &aptos_fx, + signer::address_of(recipient), + veiled_coin::cast_u32_to_u64_amount(recipient_amount)); + println(b"Transferred some fake money to the recipient."); + + println(b"Sender balance (as u64):"); + print(&coin::balance(signer::address_of(sender))); + println(b"Sender balance (as u32):"); + print(&veiled_coin::clamp_u64_to_u32_amount(coin::balance(signer::address_of(sender)))); + if (signer::address_of(recipient) != signer::address_of(sender)) { + println(b"Recipient balance (as u64):"); + print(&coin::balance(signer::address_of(recipient))); + println(b"Sender balance (as u32):"); + print(&veiled_coin::clamp_u64_to_u32_amount(coin::balance(signer::address_of(recipient)))); + } else { + println(b"(Recipient equals sender)"); + }; + } + + #[test_only] + /// Prints a string on its own line. + public fun println(str: vector) { + print(&utf8(str)); + } + + // + // Tests + // + + #[test(veiled_coin = @aptos_experimental, aptos_fx = @aptos_framework, sender = @0xc0ffee, recipient = @0x1337)] + fun veil_test( + veiled_coin: signer, + aptos_fx: signer, + sender: signer, + recipient: signer + ) { + println(b"Starting veil_test()..."); + println(b"@veiled_coin:"); + print(&@aptos_experimental); + println(b"@aptos_framework:"); + print(&@aptos_framework); + + // Split 500 and 500 between `sender` and `recipient` + set_up_for_veiled_coin_test( + &veiled_coin, aptos_fx, &sender, &recipient, 500u32, 500u32); + + // Register a veiled balance at the `recipient`'s account + let (recipient_sk, recipient_pk) = generate_elgamal_keypair(); + veiled_coin::register(&recipient, elgamal::pubkey_to_bytes(&recipient_pk)); + println(b"Registered recipient's veiled coin balance"); + + // Veil 150 normal coins from the `sender`'s normal coin account to the `recipient`'s veiled coin account + veiled_coin::veil_to(&sender, signer::address_of(&recipient), 150u32); + println(b"Sender veiled some coins over to the recipient"); + + // Check the transfer occurred correctly: sender has 350 public coins, recipient has 150 (not-yet-)veiled coins + assert!(coin::balance(signer::address_of(&sender)) == veiled_coin::cast_u32_to_u64_amount(350u32), 1); + assert!(veiled_coin::verify_opened_balance( + signer::address_of(&recipient), 150u32, &ristretto255::scalar_zero(), &recipient_pk), 1); + + // Register a veiled balance at the `sender`'s account + let (_, sender_pk) = generate_elgamal_keypair(); + veiled_coin::register(&sender, elgamal::pubkey_to_bytes(&sender_pk)); + + // The `recipient` wants to unveil 50 coins (to the `sender`), so build a range proof for that. + // (Note: Technically, because the balance is not yet actually-veiled, the range proof could be avoided here in + // a smarter design.) + let recipient_new_balance = ristretto255::new_scalar_from_u32(100u32); + let recipient_curr_balance = ristretto255::new_scalar_from_u32(150u32); + let recipient_amount_unveiled = ristretto255::new_scalar_from_u32(50u32); + let (new_balance_range_proof, _) = bulletproofs::prove_range_pedersen( + &recipient_new_balance, &ristretto255::scalar_zero(), + veiled_coin::get_max_bits_in_veiled_coin_value(), veiled_coin::get_veiled_coin_bulletproofs_dst()); + let new_balance_range_proof_bytes = bulletproofs::range_proof_to_bytes(&new_balance_range_proof); + + let curr_balance_ct = elgamal::new_ciphertext_with_basepoint(&recipient_curr_balance, &ristretto255::scalar_zero(), &recipient_pk); + let new_balance_comm = pedersen::new_commitment_for_bulletproof(&recipient_new_balance, &ristretto255::scalar_zero()); + let new_balance_comm_bytes = pedersen::commitment_to_bytes(&new_balance_comm); + + // Compute a sigma proof which shows that the recipient's new balance ciphertext and commitment both encode + // the same value. The commitment is necessary to ensure the value is binding + let sigma_proof = prove_withdrawal( + &recipient_sk, + &recipient_pk, + &curr_balance_ct, + &new_balance_comm, + &recipient_new_balance, + &recipient_amount_unveiled, + &ristretto255::scalar_zero()); + let sigma_proof_bytes = serialize_withdrawal_subproof(&sigma_proof); + + // Transfer `50` veiled coins from the `recipient` to the `sender`'s public balance + veiled_coin::unveil_to( + &recipient, signer::address_of(&sender), 50u32, new_balance_comm_bytes, new_balance_range_proof_bytes, sigma_proof_bytes); + + // Check that the sender now has 350 + 50 = 400 public coins + let sender_public_balance = coin::balance(signer::address_of(&sender)); + assert!(sender_public_balance == veiled_coin::cast_u32_to_u64_amount(400u32), 1); + // Check that the recipient now has 100 veiled coins + assert!(veiled_coin::verify_opened_balance( + signer::address_of(&recipient), 100u32, &ristretto255::scalar_zero(), &recipient_pk), 1); + } + + #[test(veiled_coin = @aptos_experimental, aptos_fx = @aptos_framework, sender = @0x1337)] + fun unveil_test( + veiled_coin: signer, + aptos_fx: signer, + sender: signer, + ) { + println(b"Starting unveil_test()..."); + println(b"@veiled_coin:"); + print(&@aptos_experimental); + println(b"@aptos_framework:"); + print(&@aptos_framework); + + // Create a `sender` account with 500 `FakeCoin`'s + set_up_for_veiled_coin_test( + &veiled_coin, aptos_fx, &sender, &sender, 500, 0); + + // Register a veiled balance for the `sender` + let (sender_sk, sender_pk) = generate_elgamal_keypair(); + veiled_coin::register(&sender, elgamal::pubkey_to_bytes(&sender_pk)); + println(b"Registered the sender's veiled balance"); + + // Veil 150 out of the `sender`'s 500 coins. + // + // Note: Sender initializes his veiled balance to 150 veiled coins, which is why we don't need its SK to decrypt + // it in order to transact. + veiled_coin::veil(&sender, 150); + println(b"Veiled 150 coins to the `sender`"); + + println(b"Total veiled coins:"); + print(&veiled_coin::total_veiled_coins()); + + assert!(veiled_coin::total_veiled_coins() == veiled_coin::cast_u32_to_u64_amount(150), 1); + + // The `unveil` function uses randomness zero for the ElGamal encryption of the amount + let sender_new_balance = ristretto255::new_scalar_from_u32(100); + let sender_curr_balance = ristretto255::new_scalar_from_u32(150); + let sender_amount_unveiled = ristretto255::new_scalar_from_u32(50); + let zero_randomness = ristretto255::scalar_zero(); + + let (new_balance_range_proof, _) = bulletproofs::prove_range_pedersen( + &sender_new_balance, + &zero_randomness, + veiled_coin::get_max_bits_in_veiled_coin_value(), veiled_coin::get_veiled_coin_bulletproofs_dst()); + + let curr_balance_ct = elgamal::new_ciphertext_with_basepoint(&sender_curr_balance, &zero_randomness, &sender_pk); + let new_balance_comm = pedersen::new_commitment_for_bulletproof(&sender_new_balance, &zero_randomness); + let new_balance_comm_bytes = pedersen::commitment_to_bytes(&new_balance_comm); + + let sigma_proof = sigma_protos::prove_withdrawal( + &sender_sk, + &sender_pk, + &curr_balance_ct, + &new_balance_comm, + &sender_new_balance, + &sender_amount_unveiled, + &zero_randomness); + let sigma_proof_bytes = serialize_withdrawal_subproof(&sigma_proof); + + println(b"about to unveil"); + // Move 50 veiled coins into the public balance of the sender + veiled_coin::unveil( + &sender, + 50, + new_balance_comm_bytes, + bulletproofs::range_proof_to_bytes(&new_balance_range_proof), + sigma_proof_bytes); + + println(b"Remaining veiled coins, after `unveil` call:"); + print(&veiled_coin::total_veiled_coins()); + + assert!(veiled_coin::total_veiled_coins() == veiled_coin::cast_u32_to_u64_amount(100), 1); + + assert!(veiled_coin::verify_opened_balance( + signer::address_of(&sender), 100, &zero_randomness, &sender_pk), 2); + + let remaining_public_balance = coin::balance(signer::address_of(&sender)); + assert!(remaining_public_balance == veiled_coin::cast_u32_to_u64_amount(400), 3); + } + + #[test(veiled_coin = @aptos_experimental, aptos_fx = @aptos_framework, sender = @0xc0ffee, recipient = @0x1337)] + fun basic_viability_test( + veiled_coin: signer, + aptos_fx: signer, + sender: signer, + recipient: signer + ) { + set_up_for_veiled_coin_test(&veiled_coin, aptos_fx, &sender, &recipient, 500, 500); + + // Creates a balance of `b = 150` veiled coins at sender (requires registering a veiled coin store at 'sender') + let (sender_sk, sender_pk) = generate_elgamal_keypair(); + veiled_coin::register(&sender, elgamal::pubkey_to_bytes(&sender_pk)); + veiled_coin::veil(&sender, 150); + println(b"Veiled 150 coins to the `sender`"); + // TODO: This throws an invariant violation (INTERNAL_TYPE_ERROR (code 2009)) + //print(&sender); + + // Make sure we are correctly keeping track of the normal coins veiled in this module + let total_veiled_coins = veiled_coin::cast_u32_to_u64_amount(150); + assert!(veiled_coin::total_veiled_coins() == total_veiled_coins, 1); + + // Transfer `v = 50` of these veiled coins to the recipient + let amount_val = ristretto255::new_scalar_from_u32(50); + let amount_rand = ristretto255::random_scalar(); + + // The commitment to the sender's new balance can use fresh randomness as we don't use the + // new balance amount in a ciphertext + let new_balance_rand = ristretto255::random_scalar(); + let curr_balance_val = ristretto255::new_scalar_from_u32(150); + + // The sender's new balance is 150 - 50 = 100 + let new_balance_val = ristretto255::new_scalar_from_u32(100); + + // No veiled transfers have been done yet so the sender's balance randomness is zero + let curr_balance_ct = elgamal::new_ciphertext_with_basepoint(&curr_balance_val, &ristretto255::scalar_zero(), &sender_pk); + let (new_balance_range_proof, _) = bulletproofs::prove_range_pedersen( + &new_balance_val, &new_balance_rand, + veiled_coin::get_max_bits_in_veiled_coin_value(), veiled_coin::get_veiled_coin_bulletproofs_dst()); + println(b"Computed range proof over the `sender`'s new balance"); + + // Compute a range proof over the commitment to `v` and encrypt it under the `sender`'s PK + let withdraw_ct = elgamal::new_ciphertext_with_basepoint(&amount_val, &amount_rand, &sender_pk); + let (amount_val_range_proof, _) = bulletproofs::prove_range_pedersen( + &amount_val, &amount_rand, + veiled_coin::get_max_bits_in_veiled_coin_value(), veiled_coin::get_veiled_coin_bulletproofs_dst()); + println(b"Computed range proof over the transferred amount"); + + // Register the `recipient` for receiving veiled coins + let (_, recipient_pk) = generate_elgamal_keypair(); + veiled_coin::register(&recipient, elgamal::pubkey_to_bytes(&recipient_pk)); + println(b"Registered the `recipient` to receive veiled coins"); + // TODO: This throws an invariant violation (INTERNAL_TYPE_ERROR (code 2009)) + //print(&recipient); + + // Encrypt the transfered amount `v` under the `recipient`'s PK + let deposit_ct = elgamal::new_ciphertext_with_basepoint( + &amount_val, &amount_rand, &recipient_pk); + + let amount_comm = pedersen::new_commitment_for_bulletproof(&amount_val, &amount_rand); + let new_balance_comm = pedersen::new_commitment_for_bulletproof(&new_balance_val, &new_balance_rand); + println(b"Computed commitments to the amount to transfer and the sender's updated balance"); + + // Prove that the two encryptions of `v` are to the same value + let sigma_proof = sigma_protos::prove_transfer( + &sender_pk, + &sender_sk, + &recipient_pk, + &withdraw_ct, + &deposit_ct, + &amount_comm, + &curr_balance_ct, + &new_balance_comm, + &amount_rand, + &amount_val, + &new_balance_rand, + &new_balance_val); + let sigma_proof_bytes = sigma_protos::serialize_transfer_subproof(&sigma_proof); + println(b"Created sigma protocol proof"); + + // Sanity check veiled balances + assert!(veiled_coin::verify_opened_balance(signer::address_of(&sender), 150, &ristretto255::scalar_zero(), &sender_pk), 1); + assert!(veiled_coin::verify_opened_balance(signer::address_of(&recipient), 0, &ristretto255::scalar_zero(), &recipient_pk), 1); + + // Execute the veiled transaction: no one will be able to tell 50 coins are being transferred. + veiled_coin::fully_veiled_transfer( + &sender, + signer::address_of(&recipient), + elgamal::ciphertext_to_bytes(&withdraw_ct), + elgamal::ciphertext_to_bytes(&deposit_ct), + pedersen::commitment_to_bytes(&new_balance_comm), + pedersen::commitment_to_bytes(&amount_comm), + bulletproofs::range_proof_to_bytes(&new_balance_range_proof), + bulletproofs::range_proof_to_bytes(&amount_val_range_proof), + sigma_proof_bytes); + println(b"Transferred veiled coins"); + + // Compute the randomness of the sender's current balance + let balance_rand = ristretto255::scalar_neg(&amount_rand); + // Sanity check veiled balances + assert!(veiled_coin::verify_opened_balance(signer::address_of(&sender), 100, &balance_rand, &sender_pk), 1); + assert!(veiled_coin::verify_opened_balance(signer::address_of(&recipient), 50, &amount_rand, &recipient_pk), 1); + + assert!(veiled_coin::total_veiled_coins() == total_veiled_coins, 1); + + // Drain the whole remaining balance of the sender + let new_curr_balance_val = ristretto255::new_scalar_from_u32(100); + let new_amount_val = ristretto255::new_scalar_from_u32(100); + let new_new_balance_val = ristretto255::new_scalar_from_u32(0); + let fresh_new_balance_rand = ristretto255::random_scalar(); + + // `unveil` doesn't change the randomness, so we reuse the `new_balance_rand` randomness from before + let (new_new_balance_range_proof, _) = bulletproofs::prove_range_pedersen( + &new_new_balance_val, &fresh_new_balance_rand, + veiled_coin::get_max_bits_in_veiled_coin_value(), veiled_coin::get_veiled_coin_bulletproofs_dst()); + + // Compute a pedersen commitment over the same values the range proof is done over to gurantee a binding commitment + // to the sender's new balance. A sigma proof demonstrates the commitment and ciphertexts contain the same value and randomness + let new_curr_balance_ct = elgamal::new_ciphertext_with_basepoint(&new_curr_balance_val, &balance_rand, &sender_pk); + let new_new_balance_comm = pedersen::new_commitment_for_bulletproof(&new_new_balance_val, &fresh_new_balance_rand); + let new_new_balance_comm_bytes = pedersen::commitment_to_bytes(&new_new_balance_comm); + let sigma_proof = sigma_protos::prove_withdrawal( + &sender_sk, + &sender_pk, + &new_curr_balance_ct, + &new_new_balance_comm, + &new_new_balance_val, + &new_amount_val, + &fresh_new_balance_rand); + let sigma_proof_bytes = serialize_withdrawal_subproof(&sigma_proof); + + // Unveil all coins of the `sender` + veiled_coin::unveil( + &sender, 100, new_new_balance_comm_bytes, bulletproofs::range_proof_to_bytes(&new_new_balance_range_proof), sigma_proof_bytes); + println(b"Unveiled all 100 coins from the `sender`'s veiled balance"); + + let total_veiled_coins = veiled_coin::cast_u32_to_u64_amount(50); + assert!(veiled_coin::total_veiled_coins() == total_veiled_coins, 1); + + // Sanity check veiled balances + assert!(veiled_coin::verify_opened_balance(signer::address_of(&sender), 0, &balance_rand, &sender_pk), 1); + assert!(veiled_coin::verify_opened_balance(signer::address_of(&recipient), 50, &amount_rand, &recipient_pk), 1); + } +} diff --git a/aptos-move/framework/aptos-framework/Prover.toml b/aptos-move/framework/aptos-framework/Prover.toml new file mode 100644 index 0000000000000..2cda331ffcc21 --- /dev/null +++ b/aptos-move/framework/aptos-framework/Prover.toml @@ -0,0 +1,2 @@ +[backend] +shards = 5 diff --git a/aptos-move/framework/aptos-framework/doc/account.md b/aptos-move/framework/aptos-framework/doc/account.md index 81ab4f7c7dd3b..e3bb969dae14f 100644 --- a/aptos-move/framework/aptos-framework/doc/account.md +++ b/aptos-move/framework/aptos-framework/doc/account.md @@ -6,9 +6,11 @@ - [Struct `KeyRotation`](#0x1_account_KeyRotation) +- [Struct `KeyRotationToPublicKey`](#0x1_account_KeyRotationToPublicKey) - [Resource `Account`](#0x1_account_Account) - [Struct `KeyRotationEvent`](#0x1_account_KeyRotationEvent) - [Struct `CoinRegisterEvent`](#0x1_account_CoinRegisterEvent) +- [Struct `CoinRegister`](#0x1_account_CoinRegister) - [Struct `CapabilityOffer`](#0x1_account_CapabilityOffer) - [Struct `RotationCapability`](#0x1_account_RotationCapability) - [Struct `SignerCapability`](#0x1_account_SignerCapability) @@ -18,21 +20,35 @@ - [Struct `SignerCapabilityOfferProofChallenge`](#0x1_account_SignerCapabilityOfferProofChallenge) - [Struct `RotationCapabilityOfferProofChallengeV2`](#0x1_account_RotationCapabilityOfferProofChallengeV2) - [Struct `SignerCapabilityOfferProofChallengeV2`](#0x1_account_SignerCapabilityOfferProofChallengeV2) +- [Enum `AccountPermission`](#0x1_account_AccountPermission) - [Constants](#@Constants_0) +- [Function `check_rotation_permission`](#0x1_account_check_rotation_permission) +- [Function `check_offering_permission`](#0x1_account_check_offering_permission) +- [Function `grant_key_rotation_permission`](#0x1_account_grant_key_rotation_permission) +- [Function `grant_key_offering_permission`](#0x1_account_grant_key_offering_permission) - [Function `initialize`](#0x1_account_initialize) - [Function `create_account_if_does_not_exist`](#0x1_account_create_account_if_does_not_exist) - [Function `create_account`](#0x1_account_create_account) - [Function `create_account_unchecked`](#0x1_account_create_account_unchecked) - [Function `exists_at`](#0x1_account_exists_at) +- [Function `resource_exists_at`](#0x1_account_resource_exists_at) - [Function `get_guid_next_creation_num`](#0x1_account_get_guid_next_creation_num) - [Function `get_sequence_number`](#0x1_account_get_sequence_number) +- [Function `originating_address`](#0x1_account_originating_address) +- [Function `ensure_resource_exists`](#0x1_account_ensure_resource_exists) - [Function `increment_sequence_number`](#0x1_account_increment_sequence_number) - [Function `get_authentication_key`](#0x1_account_get_authentication_key) - [Function `rotate_authentication_key_internal`](#0x1_account_rotate_authentication_key_internal) - [Function `rotate_authentication_key_call`](#0x1_account_rotate_authentication_key_call) +- [Function `rotate_authentication_key_from_public_key`](#0x1_account_rotate_authentication_key_from_public_key) +- [Function `upsert_ed25519_backup_key_on_keyless_account`](#0x1_account_upsert_ed25519_backup_key_on_keyless_account) + - [Arguments](#@Arguments_1) + - [Aborts](#@Aborts_2) + - [Events](#@Events_3) - [Function `rotate_authentication_key`](#0x1_account_rotate_authentication_key) - [Function `rotate_authentication_key_with_rotation_capability`](#0x1_account_rotate_authentication_key_with_rotation_capability) - [Function `offer_rotation_capability`](#0x1_account_offer_rotation_capability) +- [Function `set_originating_address`](#0x1_account_set_originating_address) - [Function `is_rotation_capability_offered`](#0x1_account_is_rotation_capability_offered) - [Function `get_rotation_capability_offer_for`](#0x1_account_get_rotation_capability_offer_for) - [Function `revoke_rotation_capability`](#0x1_account_revoke_rotation_capability) @@ -43,6 +59,7 @@ - [Function `revoke_signer_capability`](#0x1_account_revoke_signer_capability) - [Function `revoke_any_signer_capability`](#0x1_account_revoke_any_signer_capability) - [Function `create_authorized_signer`](#0x1_account_create_authorized_signer) +- [Function `assert_account_resource_with_error`](#0x1_account_assert_account_resource_with_error) - [Function `assert_valid_rotation_proof_signature_and_get_auth_key`](#0x1_account_assert_valid_rotation_proof_signature_and_get_auth_key) - [Function `update_auth_key_and_originating_address_table`](#0x1_account_update_auth_key_and_originating_address_table) - [Function `create_resource_address`](#0x1_account_create_resource_address) @@ -54,43 +71,46 @@ - [Function `create_signer_with_capability`](#0x1_account_create_signer_with_capability) - [Function `get_signer_capability_address`](#0x1_account_get_signer_capability_address) - [Function `verify_signed_message`](#0x1_account_verify_signed_message) -- [Specification](#@Specification_1) +- [Specification](#@Specification_4) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - - [Function `initialize`](#@Specification_1_initialize) - - [Function `create_account_if_does_not_exist`](#@Specification_1_create_account_if_does_not_exist) - - [Function `create_account`](#@Specification_1_create_account) - - [Function `create_account_unchecked`](#@Specification_1_create_account_unchecked) - - [Function `exists_at`](#@Specification_1_exists_at) - - [Function `get_guid_next_creation_num`](#@Specification_1_get_guid_next_creation_num) - - [Function `get_sequence_number`](#@Specification_1_get_sequence_number) - - [Function `increment_sequence_number`](#@Specification_1_increment_sequence_number) - - [Function `get_authentication_key`](#@Specification_1_get_authentication_key) - - [Function `rotate_authentication_key_internal`](#@Specification_1_rotate_authentication_key_internal) - - [Function `rotate_authentication_key_call`](#@Specification_1_rotate_authentication_key_call) - - [Function `rotate_authentication_key`](#@Specification_1_rotate_authentication_key) - - [Function `rotate_authentication_key_with_rotation_capability`](#@Specification_1_rotate_authentication_key_with_rotation_capability) - - [Function `offer_rotation_capability`](#@Specification_1_offer_rotation_capability) - - [Function `is_rotation_capability_offered`](#@Specification_1_is_rotation_capability_offered) - - [Function `get_rotation_capability_offer_for`](#@Specification_1_get_rotation_capability_offer_for) - - [Function `revoke_rotation_capability`](#@Specification_1_revoke_rotation_capability) - - [Function `revoke_any_rotation_capability`](#@Specification_1_revoke_any_rotation_capability) - - [Function `offer_signer_capability`](#@Specification_1_offer_signer_capability) - - [Function `is_signer_capability_offered`](#@Specification_1_is_signer_capability_offered) - - [Function `get_signer_capability_offer_for`](#@Specification_1_get_signer_capability_offer_for) - - [Function `revoke_signer_capability`](#@Specification_1_revoke_signer_capability) - - [Function `revoke_any_signer_capability`](#@Specification_1_revoke_any_signer_capability) - - [Function `create_authorized_signer`](#@Specification_1_create_authorized_signer) - - [Function `assert_valid_rotation_proof_signature_and_get_auth_key`](#@Specification_1_assert_valid_rotation_proof_signature_and_get_auth_key) - - [Function `update_auth_key_and_originating_address_table`](#@Specification_1_update_auth_key_and_originating_address_table) - - [Function `create_resource_address`](#@Specification_1_create_resource_address) - - [Function `create_resource_account`](#@Specification_1_create_resource_account) - - [Function `create_framework_reserved_account`](#@Specification_1_create_framework_reserved_account) - - [Function `create_guid`](#@Specification_1_create_guid) - - [Function `new_event_handle`](#@Specification_1_new_event_handle) - - [Function `register_coin`](#@Specification_1_register_coin) - - [Function `create_signer_with_capability`](#@Specification_1_create_signer_with_capability) - - [Function `verify_signed_message`](#@Specification_1_verify_signed_message) + - [Function `initialize`](#@Specification_4_initialize) + - [Function `create_account_if_does_not_exist`](#@Specification_4_create_account_if_does_not_exist) + - [Function `create_account`](#@Specification_4_create_account) + - [Function `create_account_unchecked`](#@Specification_4_create_account_unchecked) + - [Function `exists_at`](#@Specification_4_exists_at) + - [Function `get_guid_next_creation_num`](#@Specification_4_get_guid_next_creation_num) + - [Function `get_sequence_number`](#@Specification_4_get_sequence_number) + - [Function `originating_address`](#@Specification_4_originating_address) + - [Function `increment_sequence_number`](#@Specification_4_increment_sequence_number) + - [Function `get_authentication_key`](#@Specification_4_get_authentication_key) + - [Function `rotate_authentication_key_internal`](#@Specification_4_rotate_authentication_key_internal) + - [Function `rotate_authentication_key_call`](#@Specification_4_rotate_authentication_key_call) + - [Function `rotate_authentication_key_from_public_key`](#@Specification_4_rotate_authentication_key_from_public_key) + - [Function `rotate_authentication_key`](#@Specification_4_rotate_authentication_key) + - [Function `rotate_authentication_key_with_rotation_capability`](#@Specification_4_rotate_authentication_key_with_rotation_capability) + - [Function `offer_rotation_capability`](#@Specification_4_offer_rotation_capability) + - [Function `set_originating_address`](#@Specification_4_set_originating_address) + - [Function `is_rotation_capability_offered`](#@Specification_4_is_rotation_capability_offered) + - [Function `get_rotation_capability_offer_for`](#@Specification_4_get_rotation_capability_offer_for) + - [Function `revoke_rotation_capability`](#@Specification_4_revoke_rotation_capability) + - [Function `revoke_any_rotation_capability`](#@Specification_4_revoke_any_rotation_capability) + - [Function `offer_signer_capability`](#@Specification_4_offer_signer_capability) + - [Function `is_signer_capability_offered`](#@Specification_4_is_signer_capability_offered) + - [Function `get_signer_capability_offer_for`](#@Specification_4_get_signer_capability_offer_for) + - [Function `revoke_signer_capability`](#@Specification_4_revoke_signer_capability) + - [Function `revoke_any_signer_capability`](#@Specification_4_revoke_any_signer_capability) + - [Function `create_authorized_signer`](#@Specification_4_create_authorized_signer) + - [Function `assert_valid_rotation_proof_signature_and_get_auth_key`](#@Specification_4_assert_valid_rotation_proof_signature_and_get_auth_key) + - [Function `update_auth_key_and_originating_address_table`](#@Specification_4_update_auth_key_and_originating_address_table) + - [Function `create_resource_address`](#@Specification_4_create_resource_address) + - [Function `create_resource_account`](#@Specification_4_create_resource_account) + - [Function `create_framework_reserved_account`](#@Specification_4_create_framework_reserved_account) + - [Function `create_guid`](#@Specification_4_create_guid) + - [Function `new_event_handle`](#@Specification_4_new_event_handle) + - [Function `register_coin`](#@Specification_4_register_coin) + - [Function `create_signer_with_capability`](#@Specification_4_create_signer_with_capability) + - [Function `verify_signed_message`](#@Specification_4_verify_signed_message)
use 0x1::bcs;
@@ -104,8 +124,11 @@
 use 0x1::guid;
 use 0x1::hash;
 use 0x1::multi_ed25519;
+use 0x1::multi_key;
 use 0x1::option;
+use 0x1::permissioned_signer;
 use 0x1::signer;
+use 0x1::single_key;
 use 0x1::system_addresses;
 use 0x1::table;
 use 0x1::type_info;
@@ -152,6 +175,64 @@
 
 
 
+
+
+
+
+## Struct `KeyRotationToPublicKey`
+
+
+
+
#[event]
+struct KeyRotationToPublicKey has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+verified_public_key_bit_map: vector<u8> +
+
+ +
+
+public_key_scheme: u8 +
+
+ +
+
+public_key: vector<u8> +
+
+ +
+
+old_auth_key: vector<u8> +
+
+ +
+
+new_auth_key: vector<u8> +
+
+ +
+
+ +
@@ -276,6 +357,40 @@ Resource representing an account. + + + + +## Struct `CoinRegister` + + + +
#[event]
+struct CoinRegister has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+type_info: type_info::TypeInfo +
+
+ +
+
+ +
@@ -600,6 +715,55 @@ This V2 struct adds the chain_id + + + + +## Enum `AccountPermission` + + + +
enum AccountPermission has copy, drop, store
+
+ + + +
+Variants + + +
+KeyRotation + + +
+Fields + + +
+
+ + +
+ +
+ +
+Offering + + +
+Fields + + +
+
+ + +
+ +
+
@@ -689,15 +853,6 @@ Scheme identifier for Ed25519 signatures used to derive authentication keys for - - - - -
const EFLAG_NOT_ENABLED: u64 = 21;
-
- - - The caller does not have a valid rotation capability offer from the other account @@ -748,6 +903,56 @@ The provided authentication key has an invalid length + + +The new authentication key already has an entry in the OriginatingAddress table + + +
const ENEW_AUTH_KEY_ALREADY_MAPPED: u64 = 21;
+
+ + + + + +The current authentication key and the new authentication key are the same + + +
const ENEW_AUTH_KEY_SAME_AS_CURRENT: u64 = 22;
+
+ + + + + +The provided public key is not a single Keyless public key + + +
const ENOT_A_KEYLESS_PUBLIC_KEY: u64 = 25;
+
+ + + + + +The provided public key is not the original public key for the account + + +
const ENOT_THE_ORIGINAL_PUBLIC_KEY: u64 = 26;
+
+ + + + + +Current permissioned signer cannot perform the privilaged operations. + + +
const ENO_ACCOUNT_PERMISSION: u64 = 23;
+
+ + + The caller does not have a digital-signature-based capability to call this function @@ -769,7 +974,7 @@ The caller does not have a digital-signature-based capability to call this funct -The specified rotation capablity offer does not exist at the specified offerer address +The specified rotation capability offer does not exist at the specified offerer address
const ENO_SUCH_ROTATION_CAPABILITY_OFFER: u64 = 18;
@@ -837,6 +1042,16 @@ Sequence number exceeds the maximum value for a u64
 
 
 
+
+
+Specified scheme is not recognized. Should be ED25519_SCHEME(0), MULTI_ED25519_SCHEME(1), SINGLE_KEY_SCHEME(2), or MULTI_KEY_SCHEME(3).
+
+
+
const EUNRECOGNIZED_SCHEME: u64 = 24;
+
+ + + Specified current public key is not correct @@ -867,6 +1082,26 @@ Scheme identifier for MultiEd25519 signatures used to derive authentication keys + + +Scheme identifier for multi key public keys used to derive authentication keys for multi key public keys. + + +
const MULTI_KEY_SCHEME: u8 = 3;
+
+ + + + + +Scheme identifier for single key public keys used to derive authentication keys for single key public keys. + + +
const SINGLE_KEY_SCHEME: u8 = 2;
+
+ + + @@ -876,6 +1111,115 @@ Scheme identifier for MultiEd25519 signatures used to derive authentication keys + + +## Function `check_rotation_permission` + +Permissions + + +
fun check_rotation_permission(s: &signer)
+
+ + + +
+Implementation + + +
inline fun check_rotation_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, AccountPermission::KeyRotation {}),
+        error::permission_denied(ENO_ACCOUNT_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `check_offering_permission` + + + +
fun check_offering_permission(s: &signer)
+
+ + + +
+Implementation + + +
inline fun check_offering_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, AccountPermission::Offering {}),
+        error::permission_denied(ENO_ACCOUNT_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `grant_key_rotation_permission` + +Grant permission to perform key rotations on behalf of the master signer. + +This is **extremely dangerous** and should be granted only when it's absolutely needed. + + +
public fun grant_key_rotation_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_key_rotation_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, AccountPermission::KeyRotation {})
+}
+
+ + + +
+ + + +## Function `grant_key_offering_permission` + +Grant permission to use offered address's signer on behalf of the master signer. + +This is **extremely dangerous** and should be granted only when it's absolutely needed. + + +
public fun grant_key_offering_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_key_offering_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, AccountPermission::Offering {})
+}
+
+ + + +
+ ## Function `initialize` @@ -920,8 +1264,12 @@ Only called during genesis to initialize system resources for this module.
public fun create_account_if_does_not_exist(account_address: address) {
-    if (!exists<Account>(account_address)) {
-        create_account(account_address);
+    if (!resource_exists_at(account_address)) {
+        assert!(
+            account_address != @vm_reserved && account_address != @aptos_framework && account_address != @aptos_token,
+            error::invalid_argument(ECANNOT_RESERVED_ADDRESS)
+        );
+        create_account_unchecked(account_address);
     }
 }
 
@@ -951,14 +1299,16 @@ is returned. This way, the caller of this function can publish additional resour
public(friend) fun create_account(new_address: address): signer {
     // there cannot be an Account resource under new_addr already.
     assert!(!exists<Account>(new_address), error::already_exists(EACCOUNT_ALREADY_EXISTS));
-
     // NOTE: @core_resources gets created via a `create_account` call, so we do not include it below.
     assert!(
         new_address != @vm_reserved && new_address != @aptos_framework && new_address != @aptos_token,
         error::invalid_argument(ECANNOT_RESERVED_ADDRESS)
     );
-
-    create_account_unchecked(new_address)
+    if (features::is_default_account_resource_enabled()) {
+        create_signer(new_address)
+    } else {
+        create_account_unchecked(new_address)
+    }
 }
 
@@ -985,7 +1335,7 @@ is returned. This way, the caller of this function can publish additional resour let new_account = create_signer(new_address); let authentication_key = bcs::to_bytes(&new_address); assert!( - vector::length(&authentication_key) == 32, + authentication_key.length() == 32, error::invalid_argument(EMALFORMED_AUTHENTICATION_KEY) ); @@ -1018,14 +1368,120 @@ is returned. This way, the caller of this function can publish additional resour - + + +## Function `exists_at` + +Returns whether an account exists at addr. + +When the default_account_resource feature flag is enabled: +- Always returns true, indicating that any address can be treated as a valid account +- This reflects a change in the account model where accounts are now considered to exist implicitly +- The sequence number and other account properties will return default values (0) for addresses without an Account resource + +When the feature flag is disabled: +- Returns true only if an Account resource exists at addr +- This is the legacy behavior where accounts must be explicitly created + + +
#[view]
+public fun exists_at(addr: address): bool
+
+ + + +
+Implementation + + +
public fun exists_at(addr: address): bool {
+    features::is_default_account_resource_enabled() || exists<Account>(addr)
+}
+
+ + + +
+ + + +## Function `resource_exists_at` + +Returns whether an Account resource exists at addr. + +Unlike exists_at, this function strictly checks for the presence of the Account resource, +regardless of the default_account_resource feature flag. + +This is useful for operations that specifically need to know if the Account resource +has been created, rather than just whether the address can be treated as an account. + + +
fun resource_exists_at(addr: address): bool
+
+ + + +
+Implementation + + +
inline fun resource_exists_at(addr: address): bool {
+    exists<Account>(addr)
+}
+
+ + + +
+ + + +## Function `get_guid_next_creation_num` + +Returns the next GUID creation number for addr. + +When the default_account_resource feature flag is enabled: +- Returns 0 for addresses without an Account resource +- This allows GUID creation for previously non-existent accounts +- The first GUID created will start the sequence from 0 + +When the feature flag is disabled: +- Aborts if no Account resource exists at addr + + +
#[view]
+public fun get_guid_next_creation_num(addr: address): u64
+
+ + + +
+Implementation + + +
public fun get_guid_next_creation_num(addr: address): u64 acquires Account {
+    if (resource_exists_at(addr)) {
+        Account[addr].guid_creation_num
+    } else if (features::is_default_account_resource_enabled()) {
+        0
+    } else {
+        abort error::not_found(EACCOUNT_DOES_NOT_EXIST)
+    }
+}
+
+ + + +
+ + -## Function `exists_at` +## Function `get_sequence_number`
#[view]
-public fun exists_at(addr: address): bool
+public fun get_sequence_number(addr: address): u64
 
@@ -1034,8 +1490,14 @@ is returned. This way, the caller of this function can publish additional resour Implementation -
public fun exists_at(addr: address): bool {
-    exists<Account>(addr)
+
public fun get_sequence_number(addr: address): u64 acquires Account {
+    if (resource_exists_at(addr)) {
+        Account[addr].sequence_number
+    } else if (features::is_default_account_resource_enabled()) {
+        0
+    } else {
+        abort error::not_found(EACCOUNT_DOES_NOT_EXIST)
+    }
 }
 
@@ -1043,14 +1505,14 @@ is returned. This way, the caller of this function can publish additional resour - + -## Function `get_guid_next_creation_num` +## Function `originating_address`
#[view]
-public fun get_guid_next_creation_num(addr: address): u64
+public fun originating_address(auth_key: address): option::Option<address>
 
@@ -1059,8 +1521,13 @@ is returned. This way, the caller of this function can publish additional resour Implementation -
public fun get_guid_next_creation_num(addr: address): u64 acquires Account {
-    borrow_global<Account>(addr).guid_creation_num
+
public fun originating_address(auth_key: address): Option<address> acquires OriginatingAddress {
+    let address_map_ref = &OriginatingAddress[@aptos_framework].address_map;
+    if (address_map_ref.contains(auth_key)) {
+        option::some(*address_map_ref.borrow(auth_key))
+    } else {
+        option::none()
+    }
 }
 
@@ -1068,14 +1535,13 @@ is returned. This way, the caller of this function can publish additional resour - + -## Function `get_sequence_number` +## Function `ensure_resource_exists` -
#[view]
-public fun get_sequence_number(addr: address): u64
+
fun ensure_resource_exists(addr: address)
 
@@ -1084,8 +1550,12 @@ is returned. This way, the caller of this function can publish additional resour Implementation -
public fun get_sequence_number(addr: address): u64 acquires Account {
-    borrow_global<Account>(addr).sequence_number
+
inline fun ensure_resource_exists(addr: address) acquires Account{
+    if (features::is_default_account_resource_enabled()) {
+        create_account_if_does_not_exist(addr);
+    } else {
+        assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    }
 }
 
@@ -1109,7 +1579,8 @@ is returned. This way, the caller of this function can publish additional resour
public(friend) fun increment_sequence_number(addr: address) acquires Account {
-    let sequence_number = &mut borrow_global_mut<Account>(addr).sequence_number;
+    ensure_resource_exists(addr);
+    let sequence_number = &mut Account[addr].sequence_number;
 
     assert!(
         (*sequence_number as u128) < MAX_U64,
@@ -1141,7 +1612,13 @@ is returned. This way, the caller of this function can publish additional resour
 
 
 
public fun get_authentication_key(addr: address): vector<u8> acquires Account {
-    borrow_global<Account>(addr).authentication_key
+    if (resource_exists_at(addr)) {
+        Account[addr].authentication_key
+    } else if (features::is_default_account_resource_enabled()) {
+        bcs::to_bytes(&addr)
+    } else {
+        abort error::not_found(EACCOUNT_DOES_NOT_EXIST)
+    }
 }
 
@@ -1171,12 +1648,13 @@ many contexts:
public(friend) fun rotate_authentication_key_internal(account: &signer, new_auth_key: vector<u8>) acquires Account {
     let addr = signer::address_of(account);
-    assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    ensure_resource_exists(addr);
     assert!(
-        vector::length(&new_auth_key) == 32,
+        new_auth_key.length() == 32,
         error::invalid_argument(EMALFORMED_AUTHENTICATION_KEY)
     );
-    let account_resource = borrow_global_mut<Account>(addr);
+    check_rotation_permission(account);
+    let account_resource = &mut Account[addr];
     account_resource.authentication_key = new_auth_key;
 }
 
@@ -1195,6 +1673,9 @@ does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we ne the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in the format expected in rotate_authentication_key. +If you'd like to followup with updating the OriginatingAddress table, you can call +set_originating_address(). +
entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector<u8>)
 
@@ -1212,6 +1693,170 @@ the format expected in rotate_authentication_key. + + + + +## Function `rotate_authentication_key_from_public_key` + +Private entry function for key rotation that allows the signer to update their authentication key from a given public key. +This function will abort if the scheme is not recognized or if new_public_key_bytes is not a valid public key for the given scheme. + +Note: This function does not update the OriginatingAddress table. + + +
entry fun rotate_authentication_key_from_public_key(account: &signer, scheme: u8, new_public_key_bytes: vector<u8>)
+
+ + + +
+Implementation + + +
entry fun rotate_authentication_key_from_public_key(account: &signer, scheme: u8, new_public_key_bytes: vector<u8>) acquires Account {
+    let addr = signer::address_of(account);
+    let account_resource = &Account[addr];
+    let old_auth_key = account_resource.authentication_key;
+    let new_auth_key;
+    if (scheme == ED25519_SCHEME) {
+        let from_pk = ed25519::new_unvalidated_public_key_from_bytes(new_public_key_bytes);
+        new_auth_key = ed25519::unvalidated_public_key_to_authentication_key(&from_pk);
+    } else if (scheme == MULTI_ED25519_SCHEME) {
+        let from_pk = multi_ed25519::new_unvalidated_public_key_from_bytes(new_public_key_bytes);
+        new_auth_key = multi_ed25519::unvalidated_public_key_to_authentication_key(&from_pk);
+    } else if (scheme == SINGLE_KEY_SCHEME) {
+        new_auth_key = single_key::new_public_key_from_bytes(new_public_key_bytes).to_authentication_key();
+    } else if (scheme == MULTI_KEY_SCHEME) {
+        new_auth_key = multi_key::new_public_key_from_bytes(new_public_key_bytes).to_authentication_key();
+    } else {
+        abort error::invalid_argument(EUNRECOGNIZED_SCHEME)
+    };
+    rotate_authentication_key_call(account, new_auth_key);
+    event::emit(KeyRotationToPublicKey {
+        account: addr,
+        // Set verified_public_key_bit_map to [0x00, 0x00, 0x00, 0x00] as the public key(s) are not verified
+        verified_public_key_bit_map: vector[0x00, 0x00, 0x00, 0x00],
+        public_key_scheme: scheme,
+        public_key: new_public_key_bytes,
+        old_auth_key,
+        new_auth_key,
+    });
+}
+
+ + + +
+ + + +## Function `upsert_ed25519_backup_key_on_keyless_account` + +Upserts an ED25519 backup key to an account that has a keyless public key as its original public key by converting the account's authentication key +to a multi-key of the original keyless public key and the new backup key that requires 1 signature from either key to authenticate. +This function takes a the account's original keyless public key and a ED25519 backup public key and rotates the account's authentication key to a multi-key of +the original keyless public key and the new backup key that requires 1 signature from either key to authenticate. + +Note: This function emits a KeyRotationToMultiPublicKey event marking both keys as verified since the keyless public key +is the original public key of the account and the new backup key has been validated via verifying the challenge signed by the new backup key. + + + + +### Arguments + +* account - The signer representing the keyless account +* keyless_public_key - The original keyless public key of the account (wrapped in an AnyPublicKey) +* backup_public_key - The ED25519 public key to add as a backup +* backup_key_proof - A signature from the backup key proving ownership + + + + +### Aborts + +* If the any of inputs deserialize incorrectly +* If the provided public key is not a keyless public key +* If the keyless public key is not the original public key of the account +* If the backup key proof signature is invalid + + + + +### Events + +* Emits a KeyRotationToMultiPublicKey event with the new multi-key configuration + + +
entry fun upsert_ed25519_backup_key_on_keyless_account(account: &signer, keyless_public_key: vector<u8>, backup_public_key: vector<u8>, backup_key_proof: vector<u8>)
+
+ + + +
+Implementation + + +
entry fun upsert_ed25519_backup_key_on_keyless_account(account: &signer, keyless_public_key: vector<u8>, backup_public_key: vector<u8>, backup_key_proof: vector<u8>) acquires Account {
+    // Check that the provided public key is a keyless public key
+    let keyless_single_key = single_key::new_public_key_from_bytes(keyless_public_key);
+    assert!(single_key::is_keyless_or_federated_keyless_public_key(&keyless_single_key), error::invalid_argument(ENOT_A_KEYLESS_PUBLIC_KEY));
+
+    let addr = signer::address_of(account);
+    let account_resource = &mut Account[addr];
+    let old_auth_key = account_resource.authentication_key;
+
+    // Check that the provided public key is original public key of the account by comparing
+    // its authentication key to the account address.
+    assert!(
+        bcs::to_bytes(&addr) == keyless_single_key.to_authentication_key(),
+        error::invalid_argument(ENOT_THE_ORIGINAL_PUBLIC_KEY)
+    );
+
+    let curr_auth_key_as_address = from_bcs::to_address(old_auth_key);
+    let challenge = RotationProofChallenge {
+        sequence_number: account_resource.sequence_number,
+        originator: addr,
+        current_auth_key: curr_auth_key_as_address,
+        new_public_key: backup_public_key,
+    };
+
+    // Assert the challenges signed by the provided backup key is valid
+    assert_valid_rotation_proof_signature_and_get_auth_key(
+        ED25519_SCHEME,
+        backup_public_key,
+        backup_key_proof,
+        &challenge
+    );
+
+    // Get the backup key as a single key
+    let backup_key_ed25519 = ed25519::new_unvalidated_public_key_from_bytes(backup_public_key);
+    let backup_key_as_single_key = single_key::from_ed25519_public_key_unvalidated(backup_key_ed25519);
+
+    let new_public_key = multi_key::new_multi_key_from_single_keys(vector[keyless_single_key, backup_key_as_single_key], 1);
+    let new_auth_key = new_public_key.to_authentication_key();
+
+    // Rotate the authentication key to the new multi key public key
+    rotate_authentication_key_call(account, new_auth_key);
+
+    event::emit(KeyRotationToPublicKey {
+        account: addr,
+        // This marks that both the keyless public key and the new backup key are verified
+        // The keyless public key is the original public key of the account and the new backup key
+        // has been validated via verifying the challenge signed by the new backup key.
+        // Represents the bitmap 0b11000000000000000000000000000000
+        verified_public_key_bit_map: vector[0xC0, 0x00, 0x00, 0x00],
+        public_key_scheme: MULTI_KEY_SCHEME,
+        public_key: bcs::to_bytes(&new_public_key),
+        old_auth_key,
+        new_auth_key,
+    });
+}
+
+ + +
@@ -1233,7 +1878,7 @@ A scheme of 0 refers to an Ed25519 key and a scheme of 1 refers to Multi-Ed25519 Here is an example attack if we don't ask for the second signature cap_update_table: Alice has rotated her account addr_a to new_addr_a. As a result, the following entry is created, to help Alice when recovering her wallet: OriginatingAddress[new_addr_a] -> addr_a -Alice has had bad day: her laptop blew up and she needs to reset her account on a new one. +Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one. (Fortunately, she still has her secret key new_sk_a associated with her new address new_addr_a, so she can do this.) But Bob likes to mess with Alice. @@ -1267,9 +1912,10 @@ to rotate his address to Alice's address in the first place. cap_update_table: vector<u8>, ) acquires Account, OriginatingAddress { let addr = signer::address_of(account); - assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); - let account_resource = borrow_global_mut<Account>(addr); - + ensure_resource_exists(addr); + check_rotation_permission(account); + let account_resource = &mut Account[addr]; + let old_auth_key = account_resource.authentication_key; // Verify the given `from_public_key_bytes` matches this account's current authentication key. if (from_scheme == ED25519_SCHEME) { let from_pk = ed25519::new_unvalidated_public_key_from_bytes(from_public_key_bytes); @@ -1314,6 +1960,25 @@ to rotate his address to Alice's address in the first place. // Update the `OriginatingAddress` table. update_auth_key_and_originating_address_table(addr, account_resource, new_auth_key); + + let verified_public_key_bit_map; + if (to_scheme == ED25519_SCHEME) { + // Set verified_public_key_bit_map to [0x80, 0x00, 0x00, 0x00] as the public key is verified and there is only one public key. + verified_public_key_bit_map = vector[0x80, 0x00, 0x00, 0x00]; + } else { + // The new key is a multi-ed25519 key, so set the verified_public_key_bit_map to the signature bitmap. + let len = vector::length(&cap_update_table); + verified_public_key_bit_map = vector::slice(&cap_update_table, len - 4, len); + }; + + event::emit(KeyRotationToPublicKey { + account: addr, + verified_public_key_bit_map, + public_key_scheme: to_scheme, + public_key: to_public_key_bytes, + old_auth_key, + new_auth_key, + }); }
@@ -1343,13 +2008,15 @@ to rotate his address to Alice's address in the first place. new_public_key_bytes: vector<u8>, cap_update_table: vector<u8> ) acquires Account, OriginatingAddress { - assert!(exists_at(rotation_cap_offerer_address), error::not_found(EOFFERER_ADDRESS_DOES_NOT_EXIST)); + check_rotation_permission(delegate_signer); + assert!(resource_exists_at(rotation_cap_offerer_address), error::not_found(EOFFERER_ADDRESS_DOES_NOT_EXIST)); - // Check that there exists a rotation capability offer at the offerer's account resource for the delegate. + // Check that there exists a rotation capability offer at the offerer's account resource for the delegate. let delegate_address = signer::address_of(delegate_signer); - let offerer_account_resource = borrow_global<Account>(rotation_cap_offerer_address); + let offerer_account_resource = &Account[rotation_cap_offerer_address]; + let old_auth_key = offerer_account_resource.authentication_key; assert!( - option::contains(&offerer_account_resource.rotation_capability_offer.for, &delegate_address), + offerer_account_resource.rotation_capability_offer.for.contains(&delegate_address), error::not_found(ENO_SUCH_ROTATION_CAPABILITY_OFFER) ); @@ -1370,12 +2037,31 @@ to rotate his address to Alice's address in the first place. ); // Update the `OriginatingAddress` table, so we can find the originating address using the new address. - let offerer_account_resource = borrow_global_mut<Account>(rotation_cap_offerer_address); + let offerer_account_resource = &mut Account[rotation_cap_offerer_address]; update_auth_key_and_originating_address_table( rotation_cap_offerer_address, offerer_account_resource, new_auth_key ); + + let verified_public_key_bit_map; + if (new_scheme == ED25519_SCHEME) { + // Set verified_public_key_bit_map to [0x80, 0x00, 0x00, 0x00] as the public key is verified and there is only one public key. + verified_public_key_bit_map = vector[0x80, 0x00, 0x00, 0x00]; + } else { + // The new key is a multi-ed25519 key, so set the verified_public_key_bit_map to the signature bitmap. + let len = vector::length(&cap_update_table); + verified_public_key_bit_map = vector::slice(&cap_update_table, len - 4, len); + }; + + event::emit(KeyRotationToPublicKey { + account: rotation_cap_offerer_address, + verified_public_key_bit_map, + public_key_scheme: new_scheme, + public_key: new_public_key_bytes, + old_auth_key, + new_auth_key, + }); }
@@ -1422,11 +2108,13 @@ offer, calling this function will replace the previous recipient_addressvector<u8>, recipient_address: address, ) acquires Account { + check_rotation_permission(account); let addr = signer::address_of(account); + ensure_resource_exists(addr); assert!(exists_at(recipient_address), error::not_found(EACCOUNT_DOES_NOT_EXIST)); - // proof that this account intends to delegate its rotation capability to another account - let account_resource = borrow_global_mut<Account>(addr); + // proof that this account intends to delegate its rotation capability to another account + let account_resource = &mut Account[addr]; let proof_challenge = RotationCapabilityOfferProofChallengeV2 { chain_id: chain_id::get(), sequence_number: account_resource.sequence_number, @@ -1465,8 +2153,58 @@ offer, calling this function will replace the previous recipient_addressabort error::invalid_argument(EINVALID_SCHEME) }; - // update the existing rotation capability offer or put in a new rotation capability offer for the current account - option::swap_or_fill(&mut account_resource.rotation_capability_offer.for, recipient_address); + // update the existing rotation capability offer or put in a new rotation capability offer for the current account + account_resource.rotation_capability_offer.for.swap_or_fill(recipient_address); +} +
+ + + + + + + +## Function `set_originating_address` + +For the given account, add an entry to OriginatingAddress table mapping the account's +authentication key to the account's address. + +Can be used as a followup to rotate_authentication_key_call() to reconcile the +OriginatingAddress table, or to establish a mapping for a new account that has not yet had +its authentication key rotated. + +Aborts if there is already an entry in the OriginatingAddress table for the account's +authentication key. + +Kept as a private entry function to ensure that after an unproven rotation via +rotate_authentication_key_call(), the OriginatingAddress table is only updated under the +authority of the new authentication key. + + +
entry fun set_originating_address(account: &signer)
+
+ + + +
+Implementation + + +
entry fun set_originating_address(account: &signer) acquires Account, OriginatingAddress {
+    let account_addr = signer::address_of(account);
+    assert!(exists<Account>(account_addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    let auth_key_as_address =
+        from_bcs::to_address(Account[account_addr].authentication_key);
+    let address_map_ref_mut =
+        &mut OriginatingAddress[@aptos_framework].address_map;
+    if (address_map_ref_mut.contains(auth_key_as_address)) {
+        assert!(
+            *address_map_ref_mut.borrow(auth_key_as_address) == account_addr,
+            error::invalid_argument(ENEW_AUTH_KEY_ALREADY_MAPPED)
+        );
+    } else {
+        address_map_ref_mut.add(auth_key_as_address, account_addr);
+    };
 }
 
@@ -1492,8 +2230,15 @@ Returns true if the account at account_addr has a rotation capabili
public fun is_rotation_capability_offered(account_addr: address): bool acquires Account {
-    let account_resource = borrow_global<Account>(account_addr);
-    option::is_some(&account_resource.rotation_capability_offer.for)
+    if (features::is_default_account_resource_enabled()) {
+        if (!resource_exists_at(account_addr)) {
+            return false;
+        }
+    } else {
+        assert!(exists_at(account_addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    };
+    let account_resource = &Account[account_addr];
+    account_resource.rotation_capability_offer.for.is_some()
 }
 
@@ -1519,12 +2264,13 @@ Returns the address of the account that has a rotation capability offer from the
public fun get_rotation_capability_offer_for(account_addr: address): address acquires Account {
-    let account_resource = borrow_global<Account>(account_addr);
+    assert_account_resource_with_error(account_addr, ENO_SUCH_ROTATION_CAPABILITY_OFFER);
+    let account_resource = &Account[account_addr];
     assert!(
-        option::is_some(&account_resource.rotation_capability_offer.for),
+        account_resource.rotation_capability_offer.for.is_some(),
         error::not_found(ENO_SIGNER_CAPABILITY_OFFERED),
     );
-    *option::borrow(&account_resource.rotation_capability_offer.for)
+    *account_resource.rotation_capability_offer.for.borrow()
 }
 
@@ -1550,10 +2296,12 @@ Revoke the rotation capability offer given to to_be_revoked_recipient_addr
public entry fun revoke_rotation_capability(account: &signer, to_be_revoked_address: address) acquires Account {
     assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    check_rotation_permission(account);
     let addr = signer::address_of(account);
-    let account_resource = borrow_global_mut<Account>(addr);
+    assert_account_resource_with_error(addr, ENO_SUCH_ROTATION_CAPABILITY_OFFER);
+    let account_resource = &Account[addr];
     assert!(
-        option::contains(&account_resource.rotation_capability_offer.for, &to_be_revoked_address),
+        account_resource.rotation_capability_offer.for.contains(&to_be_revoked_address),
         error::not_found(ENO_SUCH_ROTATION_CAPABILITY_OFFER)
     );
     revoke_any_rotation_capability(account);
@@ -1581,8 +2329,11 @@ Revoke any rotation capability offer in the specified account.
 
 
 
public entry fun revoke_any_rotation_capability(account: &signer) acquires Account {
-    let account_resource = borrow_global_mut<Account>(signer::address_of(account));
-    option::extract(&mut account_resource.rotation_capability_offer.for);
+    check_rotation_permission(account);
+    let offerer_addr = signer::address_of(account);
+    assert_account_resource_with_error(offerer_addr, ENO_SUCH_ROTATION_CAPABILITY_OFFER);
+    let account_resource = &mut Account[signer::address_of(account)];
+    account_resource.rotation_capability_offer.for.extract();
 }
 
@@ -1621,10 +2372,12 @@ to the account owner's signer capability). account_public_key_bytes: vector<u8>, recipient_address: address ) acquires Account { + check_offering_permission(account); let source_address = signer::address_of(account); + ensure_resource_exists(source_address); assert!(exists_at(recipient_address), error::not_found(EACCOUNT_DOES_NOT_EXIST)); - // Proof that this account intends to delegate its signer capability to another account. + // Proof that this account intends to delegate its signer capability to another account. let proof_challenge = SignerCapabilityOfferProofChallengeV2 { sequence_number: get_sequence_number(source_address), source_address, @@ -1633,9 +2386,9 @@ to the account owner's signer capability). verify_signed_message( source_address, account_scheme, account_public_key_bytes, signer_capability_sig_bytes, proof_challenge); - // Update the existing signer capability offer or put in a new signer capability offer for the recipient. - let account_resource = borrow_global_mut<Account>(source_address); - option::swap_or_fill(&mut account_resource.signer_capability_offer.for, recipient_address); + // Update the existing signer capability offer or put in a new signer capability offer for the recipient. + let account_resource = &mut Account[source_address]; + account_resource.signer_capability_offer.for.swap_or_fill(recipient_address); }
@@ -1661,8 +2414,15 @@ Returns true if the account at account_addr has a signer capability
public fun is_signer_capability_offered(account_addr: address): bool acquires Account {
-    let account_resource = borrow_global<Account>(account_addr);
-    option::is_some(&account_resource.signer_capability_offer.for)
+    if (features::is_default_account_resource_enabled()) {
+        if (!resource_exists_at(account_addr)) {
+            return false;
+        }
+    } else {
+        assert!(exists_at(account_addr), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    };
+    let account_resource = &Account[account_addr];
+    account_resource.signer_capability_offer.for.is_some()
 }
 
@@ -1688,12 +2448,13 @@ Returns the address of the account that has a signer capability offer from the a
public fun get_signer_capability_offer_for(account_addr: address): address acquires Account {
-    let account_resource = borrow_global<Account>(account_addr);
+    assert_account_resource_with_error(account_addr, ENO_SIGNER_CAPABILITY_OFFERED);
+    let account_resource = &Account[account_addr];
     assert!(
-        option::is_some(&account_resource.signer_capability_offer.for),
+        account_resource.signer_capability_offer.for.is_some(),
         error::not_found(ENO_SIGNER_CAPABILITY_OFFERED),
     );
-    *option::borrow(&account_resource.signer_capability_offer.for)
+    *account_resource.signer_capability_offer.for.borrow()
 }
 
@@ -1719,11 +2480,13 @@ has a signer capability offer from accoun
public entry fun revoke_signer_capability(account: &signer, to_be_revoked_address: address) acquires Account {
+    check_offering_permission(account);
     assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST));
     let addr = signer::address_of(account);
-    let account_resource = borrow_global_mut<Account>(addr);
+    assert_account_resource_with_error(addr, ENO_SUCH_SIGNER_CAPABILITY);
+    let account_resource = &Account[addr];
     assert!(
-        option::contains(&account_resource.signer_capability_offer.for, &to_be_revoked_address),
+        account_resource.signer_capability_offer.for.contains(&to_be_revoked_address),
         error::not_found(ENO_SUCH_SIGNER_CAPABILITY)
     );
     revoke_any_signer_capability(account);
@@ -1751,8 +2514,11 @@ Revoke any signer capability offer in the specified account.
 
 
 
public entry fun revoke_any_signer_capability(account: &signer) acquires Account {
-    let account_resource = borrow_global_mut<Account>(signer::address_of(account));
-    option::extract(&mut account_resource.signer_capability_offer.for);
+    check_offering_permission(account);
+    let offerer_addr = signer::address_of(account);
+    assert_account_resource_with_error(offerer_addr, ENO_SUCH_SIGNER_CAPABILITY);
+    let account_resource = &mut Account[signer::address_of(account)];
+    account_resource.signer_capability_offer.for.extract();
 }
 
@@ -1778,13 +2544,13 @@ at the offerer's address.
public fun create_authorized_signer(account: &signer, offerer_address: address): signer acquires Account {
-    assert!(exists_at(offerer_address), error::not_found(EOFFERER_ADDRESS_DOES_NOT_EXIST));
-
-    // Check if there's an existing signer capability offer from the offerer.
-    let account_resource = borrow_global<Account>(offerer_address);
+    check_offering_permission(account);
+    assert_account_resource_with_error(offerer_address, ENO_SUCH_SIGNER_CAPABILITY);
+    // Check if there's an existing signer capability offer from the offerer.
+    let account_resource = &Account[offerer_address];
     let addr = signer::address_of(account);
     assert!(
-        option::contains(&account_resource.signer_capability_offer.for, &addr),
+        account_resource.signer_capability_offer.for.contains(&addr),
         error::not_found(ENO_SUCH_SIGNER_CAPABILITY)
     );
 
@@ -1794,6 +2560,37 @@ at the offerer's address.
 
 
 
+
+ + + +## Function `assert_account_resource_with_error` + + + +
fun assert_account_resource_with_error(account: address, error_code: u64)
+
+ + + +
+Implementation + + +
inline fun assert_account_resource_with_error(account: address, error_code: u64) {
+    if (features::is_default_account_resource_enabled()) {
+        assert!(
+            resource_exists_at(account),
+            error::not_found(error_code),
+        );
+    } else {
+        assert!(exists_at(account), error::not_found(EACCOUNT_DOES_NOT_EXIST));
+    };
+}
+
+ + +
@@ -1866,13 +2663,18 @@ in the event of key recovery. account_resource: &mut Account, new_auth_key_vector: vector<u8>, ) acquires OriginatingAddress { - let address_map = &mut borrow_global_mut<OriginatingAddress>(@aptos_framework).address_map; + let address_map = &mut OriginatingAddress[@aptos_framework].address_map; let curr_auth_key = from_bcs::to_address(account_resource.authentication_key); + let new_auth_key = from_bcs::to_address(new_auth_key_vector); + assert!( + new_auth_key != curr_auth_key, + error::invalid_argument(ENEW_AUTH_KEY_SAME_AS_CURRENT) + ); // Checks `OriginatingAddress[curr_auth_key]` is either unmapped, or mapped to `originating_address`. // If it's mapped to the originating address, removes that mapping. // Otherwise, abort if it's mapped to a different address. - if (table::contains(address_map, curr_auth_key)) { + if (address_map.contains(curr_auth_key)) { // If account_a with address_a is rotating its keypair from keypair_a to keypair_b, we expect // the address of the account to stay the same, while its keypair updates to keypair_b. // Here, by asserting that we're calling from the account with the originating address, we enforce @@ -1883,14 +2685,17 @@ in the event of key recovery. // If the account with address b calls this function with two valid signatures, it will abort at this step, // because address b is not the account's originating address. assert!( - originating_addr == table::remove(address_map, curr_auth_key), + originating_addr == address_map.remove(curr_auth_key), error::not_found(EINVALID_ORIGINATING_ADDRESS) ); }; // Set `OriginatingAddress[new_auth_key] = originating_address`. - let new_auth_key = from_bcs::to_address(new_auth_key_vector); - table::add(address_map, new_auth_key, originating_addr); + assert!( + !address_map.contains(new_auth_key), + error::invalid_argument(ENEW_AUTH_KEY_ALREADY_MAPPED) + ); + address_map.add(new_auth_key, originating_addr); if (std::features::module_event_migration_enabled()) { event::emit(KeyRotation { @@ -1898,14 +2703,15 @@ in the event of key recovery. old_authentication_key: account_resource.authentication_key, new_authentication_key: new_auth_key_vector, }); + } else { + event::emit_event<KeyRotationEvent>( + &mut account_resource.key_rotation_events, + KeyRotationEvent { + old_authentication_key: account_resource.authentication_key, + new_authentication_key: new_auth_key_vector, + } + ); }; - event::emit_event<KeyRotationEvent>( - &mut account_resource.key_rotation_events, - KeyRotationEvent { - old_authentication_key: account_resource.authentication_key, - new_authentication_key: new_auth_key_vector, - } - ); // Update the account resource's authentication key. account_resource.authentication_key = new_auth_key_vector; @@ -1936,8 +2742,8 @@ involves the use of a cryptographic hash operation and should be use thoughtfull
public fun create_resource_address(source: &address, seed: vector<u8>): address {
     let bytes = bcs::to_bytes(source);
-    vector::append(&mut bytes, seed);
-    vector::push_back(&mut bytes, DERIVE_RESOURCE_ACCOUNT_SCHEME);
+    bytes.append(seed);
+    bytes.push_back(DERIVE_RESOURCE_ACCOUNT_SCHEME);
     from_bcs::to_address(hash::sha3_256(bytes))
 }
 
@@ -1973,13 +2779,15 @@ than (1/2)^(256).
public fun create_resource_account(source: &signer, seed: vector<u8>): (signer, SignerCapability) acquires Account {
     let resource_addr = create_resource_address(&signer::address_of(source), seed);
     let resource = if (exists_at(resource_addr)) {
-        let account = borrow_global<Account>(resource_addr);
+        if (resource_exists_at(resource_addr)) {
+        let account = &Account[resource_addr];
         assert!(
-            option::is_none(&account.signer_capability_offer.for),
+            account.signer_capability_offer.for.is_none(),
             error::already_exists(ERESOURCE_ACCCOUNT_EXISTS),
         );
+        };
         assert!(
-            account.sequence_number == 0,
+            get_sequence_number(resource_addr) == 0,
             error::invalid_state(EACCOUNT_ALREADY_USED),
         );
         create_signer(resource_addr)
@@ -1992,7 +2800,7 @@ than (1/2)^(256).
     // of the resource account using the SignerCapability.
     rotate_authentication_key_internal(&resource, ZERO_AUTH_KEY);
 
-    let account = borrow_global_mut<Account>(resource_addr);
+    let account = &mut Account[resource_addr];
     account.signer_capability_offer.for = option::some(resource_addr);
     let signer_cap = SignerCapability { account: resource_addr };
     (resource, signer_cap)
@@ -2048,6 +2856,16 @@ create the account for system reserved addresses
 ## Function `create_guid`
 
 GUID management methods.
+Creates a new GUID for account_signer and increments the GUID creation number.
+
+When the default_account_resource feature flag is enabled:
+- If no Account resource exists, one will be created automatically
+- This ensures consistent GUID creation behavior for all addresses
+
+When the feature flag is disabled:
+- Aborts if no Account resource exists
+
+Aborts if the maximum number of GUIDs has been reached (0x4000000000000)
 
 
 
public fun create_guid(account_signer: &signer): guid::GUID
@@ -2061,7 +2879,8 @@ GUID management methods.
 
 
public fun create_guid(account_signer: &signer): guid::GUID acquires Account {
     let addr = signer::address_of(account_signer);
-    let account = borrow_global_mut<Account>(addr);
+    ensure_resource_exists(addr);
+    let account = &mut Account[addr];
     let guid = guid::create(addr, &mut account.guid_creation_num);
     assert!(
         account.guid_creation_num < MAX_GUID_CREATION_NUM,
@@ -2079,7 +2898,10 @@ GUID management methods.
 
 ## Function `new_event_handle`
 
-GUID management methods.
+Creates a new event handle for account.
+
+This is a wrapper around create_guid that creates an EventHandle,
+inheriting the same behavior regarding account existence and feature flags.
 
 
 
public fun new_event_handle<T: drop, store>(account: &signer): event::EventHandle<T>
@@ -2117,13 +2939,23 @@ Coin management methods.
 
 
 
public(friend) fun register_coin<CoinType>(account_addr: address) acquires Account {
-    let account = borrow_global_mut<Account>(account_addr);
-    event::emit_event<CoinRegisterEvent>(
-        &mut account.coin_register_events,
-        CoinRegisterEvent {
-            type_info: type_info::type_of<CoinType>(),
-        },
-    );
+    if (std::features::module_event_migration_enabled()) {
+        event::emit(
+            CoinRegister {
+                account: account_addr,
+                type_info: type_info::type_of<CoinType>(),
+            },
+        );
+    } else {
+        ensure_resource_exists(account_addr);
+        let account = &mut Account[account_addr];
+        event::emit_event<CoinRegisterEvent>(
+            &mut account.coin_register_events,
+            CoinRegisterEvent {
+                type_info: type_info::type_of<CoinType>(),
+            },
+        );
+    }
 }
 
@@ -2138,7 +2970,7 @@ Coin management methods. Capability based functions for efficient use. -
public fun create_signer_with_capability(capability: &account::SignerCapability): signer
+
public fun create_signer_with_capability(capability: &account::SignerCapability): signer
 
@@ -2147,8 +2979,8 @@ Capability based functions for efficient use. Implementation -
public fun create_signer_with_capability(capability: &SignerCapability): signer {
-    let addr = &capability.account;
+
public fun create_signer_with_capability(capability: &SignerCapability): signer {
+    let addr = &capability.account;
     create_signer(*addr)
 }
 
@@ -2163,7 +2995,7 @@ Capability based functions for efficient use. -
public fun get_signer_capability_address(capability: &account::SignerCapability): address
+
public fun get_signer_capability_address(capability: &account::SignerCapability): address
 
@@ -2172,8 +3004,8 @@ Capability based functions for efficient use. Implementation -
public fun get_signer_capability_address(capability: &SignerCapability): address {
-    capability.account
+
public fun get_signer_capability_address(capability: &SignerCapability): address {
+    capability.account
 }
 
@@ -2203,13 +3035,13 @@ Capability based functions for efficient use. signed_message_bytes: vector<u8>, message: T, ) acquires Account { - let account_resource = borrow_global_mut<Account>(account); + let auth_key = get_authentication_key(account); // Verify that the `SignerCapabilityOfferProofChallengeV2` has the right information and is signed by the account owner's key if (account_scheme == ED25519_SCHEME) { let pubkey = ed25519::new_unvalidated_public_key_from_bytes(account_public_key); let expected_auth_key = ed25519::unvalidated_public_key_to_authentication_key(&pubkey); assert!( - account_resource.authentication_key == expected_auth_key, + auth_key == expected_auth_key, error::invalid_argument(EWRONG_CURRENT_PUBLIC_KEY), ); @@ -2222,7 +3054,7 @@ Capability based functions for efficient use. let pubkey = multi_ed25519::new_unvalidated_public_key_from_bytes(account_public_key); let expected_auth_key = multi_ed25519::unvalidated_public_key_to_authentication_key(&pubkey); assert!( - account_resource.authentication_key == expected_auth_key, + auth_key == expected_auth_key, error::invalid_argument(EWRONG_CURRENT_PUBLIC_KEY), ); @@ -2241,7 +3073,7 @@ Capability based functions for efficient use. - + ## Specification @@ -2363,13 +3195,12 @@ Capability based functions for efficient use. ### Module-level Specification -
pragma verify = true;
-pragma aborts_if_is_strict;
+
pragma verify = false;
 
- + ### Function `initialize` @@ -2390,7 +3221,7 @@ OriginatingAddress does not exist under @aptos_framework before the - + ### Function `create_account_if_does_not_exist` @@ -2403,6 +3234,7 @@ Ensure that the account exists at the end of the call.
let authentication_key = bcs::to_bytes(account_address);
+modifies global<Account>(account_address);
 aborts_if !exists<Account>(account_address) && (
     account_address == @vm_reserved
     || account_address == @aptos_framework
@@ -2414,7 +3246,7 @@ Ensure that the account exists at the end of the call.
 
 
 
-
+
 
 ### Function `create_account`
 
@@ -2437,7 +3269,7 @@ Limit the new account address is not @vm_reserved / @aptos_framework / @aptos_to
 
 
 
-
+
 
 ### Function `create_account_unchecked`
 
@@ -2450,14 +3282,16 @@ Check if the bytes of the new address is 32.
 The Account does not exist under the new address before creating the account.
 
 
-
include CreateAccountAbortsIf {addr: new_address};
+
pragma opaque;
+include CreateAccountAbortsIf {addr: new_address};
+modifies global<Account>(new_address);
 ensures signer::address_of(result) == new_address;
 ensures exists<Account>(new_address);
 
- + ### Function `exists_at` @@ -2469,8 +3303,23 @@ The Account does not exist under the new address before creating the account. -
// This enforces high-level requirement 3:
+
pragma opaque;
+// This enforces high-level requirement 3:
 aborts_if false;
+ensures result == spec_exists_at(addr);
+
+ + + + + + + +
fun spec_exists_at(addr: address): bool {
+   use std::features;
+   use std::features::DEFAULT_ACCOUNT_RESOURCE;
+   features::spec_is_enabled(DEFAULT_ACCOUNT_RESOURCE) || exists<Account>(addr)
+}
 
@@ -2490,7 +3339,7 @@ The Account does not exist under the new address before creating the account. - + ### Function `get_guid_next_creation_num` @@ -2508,7 +3357,7 @@ The Account does not exist under the new address before creating the account. - + ### Function `get_sequence_number` @@ -2526,7 +3375,24 @@ The Account does not exist under the new address before creating the account. - + + +### Function `originating_address` + + +
#[view]
+public fun originating_address(auth_key: address): option::Option<address>
+
+ + + + +
pragma verify=false;
+
+ + + + ### Function `increment_sequence_number` @@ -2550,7 +3416,7 @@ The sequence_number of the Account is up to MAX_U64. - + ### Function `get_authentication_key` @@ -2562,13 +3428,25 @@ The sequence_number of the Account is up to MAX_U64. -
aborts_if !exists<Account>(addr);
-ensures result == global<Account>(addr).authentication_key;
+
pragma opaque;
+aborts_if !exists<Account>(addr);
+ensures result == spec_get_authentication_key(addr);
 
- + + + + +
fun spec_get_authentication_key(addr: address): vector<u8> {
+   global<Account>(addr).authentication_key
+}
+
+ + + + ### Function `rotate_authentication_key_internal` @@ -2592,7 +3470,7 @@ The length of new_auth_key is 32. - + ### Function `rotate_authentication_key_call` @@ -2614,6 +3492,22 @@ The length of new_auth_key is 32. + + +### Function `rotate_authentication_key_from_public_key` + + +
entry fun rotate_authentication_key_from_public_key(account: &signer, scheme: u8, new_public_key_bytes: vector<u8>)
+
+ + + + +
aborts_if scheme != ED25519_SCHEME && scheme != MULTI_ED25519_SCHEME && scheme != SINGLE_KEY_SCHEME && scheme != MULTI_KEY_SCHEME;
+
+ + + @@ -2623,7 +3517,7 @@ The length of new_auth_key is 32. - + ### Function `rotate_authentication_key` @@ -2692,7 +3586,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME - + ### Function `rotate_authentication_key_with_rotation_capability` @@ -2743,7 +3637,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME - + ### Function `offer_rotation_capability` @@ -2797,7 +3691,23 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME - + + +### Function `set_originating_address` + + +
entry fun set_originating_address(account: &signer)
+
+ + + + +
pragma verify=false;
+
+ + + + ### Function `is_rotation_capability_offered` @@ -2814,7 +3724,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME - + ### Function `get_rotation_capability_offer_for` @@ -2833,7 +3743,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME - + ### Function `revoke_rotation_capability` @@ -2857,7 +3767,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME - + ### Function `revoke_any_rotation_capability` @@ -2880,7 +3790,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME - + ### Function `offer_signer_capability` @@ -2934,7 +3844,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME. - + ### Function `is_signer_capability_offered` @@ -2951,7 +3861,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME. - + ### Function `get_signer_capability_offer_for` @@ -2970,7 +3880,7 @@ The authentication scheme is ED25519_SCHEME and MULTI_ED25519_SCHEME. - + ### Function `revoke_signer_capability` @@ -2994,7 +3904,7 @@ The value of signer_capability_offer.for of Account resource under the signer is - + ### Function `revoke_any_signer_capability` @@ -3014,7 +3924,7 @@ The value of signer_capability_offer.for of Account resource under the signer is - + ### Function `create_authorized_signer` @@ -3056,7 +3966,7 @@ The value of signer_capability_offer.for of Account resource under the signer is - + ### Function `assert_valid_rotation_proof_signature_and_get_auth_key` @@ -3103,7 +4013,7 @@ The value of signer_capability_offer.for of Account resource under the signer is - + ### Function `update_auth_key_and_originating_address_table` @@ -3136,6 +4046,7 @@ The value of signer_capability_offer.for of Account resource under the signer is aborts_if table::spec_contains(address_map, curr_auth_key) && table::spec_get(address_map, curr_auth_key) != originating_addr; aborts_if !from_bcs::deserializable<address>(new_auth_key_vector); + aborts_if curr_auth_key == new_auth_key; aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key); ensures table::spec_contains(global<OriginatingAddress>(@aptos_framework).address_map, from_bcs::deserialize<address>(new_auth_key_vector)); } @@ -3143,7 +4054,7 @@ The value of signer_capability_offer.for of Account resource under the signer is - + ### Function `create_resource_address` @@ -3160,6 +4071,7 @@ The value of signer_capability_offer.for of Account resource under the signer is pragma aborts_if_is_strict = false; aborts_if [abstract] false; ensures [abstract] result == spec_create_resource_address(source, seed); +ensures [abstract] source != result;
@@ -3173,7 +4085,7 @@ The value of signer_capability_offer.for of Account resource under the signer is - + ### Function `create_resource_account` @@ -3187,8 +4099,8 @@ The value of signer_capability_offer.for of Account resource under the signer is
let source_addr = signer::address_of(source);
 let resource_addr = spec_create_resource_address(source_addr, seed);
 aborts_if len(ZERO_AUTH_KEY) != 32;
-include exists_at(resource_addr) ==> CreateResourceAccountAbortsIf;
-include !exists_at(resource_addr) ==> CreateAccountAbortsIf {addr: resource_addr};
+include spec_exists_at(resource_addr) ==> CreateResourceAccountAbortsIf;
+include !spec_exists_at(resource_addr) ==> CreateAccountAbortsIf {addr: resource_addr};
 ensures signer::address_of(result_1) == resource_addr;
 let post offer_for = global<Account>(resource_addr).signer_capability_offer.for;
 ensures option::spec_borrow(offer_for) == resource_addr;
@@ -3197,7 +4109,7 @@ The value of signer_capability_offer.for of Account resource under the signer is
 
 
 
-
+
 
 ### Function `create_framework_reserved_account`
 
@@ -3239,7 +4151,7 @@ The system reserved addresses is @0x1 / @0x2 / @0x3 / @0x4 / @0x5  / @0x6 / @0x7
 
 
 
-
+
 
 ### Function `create_guid`
 
@@ -3249,7 +4161,7 @@ The system reserved addresses is @0x1 / @0x2 / @0x3 / @0x4 / @0x5  / @0x6 / @0x7
 
 
 The Account existed under the signer.
-The guid_creation_num of the ccount resource is up to MAX_U64.
+The guid_creation_num of the account resource is up to MAX_U64.
 
 
 
let addr = signer::address_of(account_signer);
@@ -3263,7 +4175,7 @@ The guid_creation_num of the ccount resource is up to MAX_U64.
 
 
 
-
+
 
 ### Function `new_event_handle`
 
@@ -3297,7 +4209,7 @@ The guid_creation_num of the Account is up to MAX_U64.
 
 
 
-
+
 
 ### Function `register_coin`
 
@@ -3315,18 +4227,18 @@ The guid_creation_num of the Account is up to MAX_U64.
 
 
 
-
+
 
 ### Function `create_signer_with_capability`
 
 
-
public fun create_signer_with_capability(capability: &account::SignerCapability): signer
+
public fun create_signer_with_capability(capability: &account::SignerCapability): signer
 
-
let addr = capability.account;
+
let addr = capability.account;
 ensures signer::address_of(result) == addr;
 
@@ -3339,14 +4251,12 @@ The guid_creation_num of the Account is up to MAX_U64.
schema CreateResourceAccountAbortsIf {
     resource_addr: address;
     let account = global<Account>(resource_addr);
-    aborts_if len(account.signer_capability_offer.for.vec) != 0;
-    aborts_if account.sequence_number != 0;
 }
 
- + ### Function `verify_signed_message` diff --git a/aptos-move/framework/aptos-framework/doc/account_abstraction.md b/aptos-move/framework/aptos-framework/doc/account_abstraction.md new file mode 100644 index 0000000000000..b992c92c3c3c1 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/account_abstraction.md @@ -0,0 +1,1011 @@ + + + +# Module `0x1::account_abstraction` + + + +- [Struct `UpdateDispatchableAuthenticator`](#0x1_account_abstraction_UpdateDispatchableAuthenticator) +- [Struct `RemoveDispatchableAuthenticator`](#0x1_account_abstraction_RemoveDispatchableAuthenticator) +- [Enum Resource `DispatchableAuthenticator`](#0x1_account_abstraction_DispatchableAuthenticator) +- [Enum `DerivableRegisterValue`](#0x1_account_abstraction_DerivableRegisterValue) +- [Enum Resource `DerivableDispatchableAuthenticator`](#0x1_account_abstraction_DerivableDispatchableAuthenticator) +- [Constants](#@Constants_0) +- [Function `using_dispatchable_authenticator`](#0x1_account_abstraction_using_dispatchable_authenticator) +- [Function `dispatchable_authenticator`](#0x1_account_abstraction_dispatchable_authenticator) +- [Function `derive_account_address_view`](#0x1_account_abstraction_derive_account_address_view) +- [Function `derive_account_address`](#0x1_account_abstraction_derive_account_address) +- [Function `add_authentication_function`](#0x1_account_abstraction_add_authentication_function) +- [Function `remove_authentication_function`](#0x1_account_abstraction_remove_authentication_function) +- [Function `remove_authenticator`](#0x1_account_abstraction_remove_authenticator) +- [Function `register_derivable_authentication_function`](#0x1_account_abstraction_register_derivable_authentication_function) +- [Function `initialize`](#0x1_account_abstraction_initialize) +- [Function `resource_addr`](#0x1_account_abstraction_resource_addr) +- [Function `update_dispatchable_authenticator_impl`](#0x1_account_abstraction_update_dispatchable_authenticator_impl) +- [Function `dispatchable_authenticator_internal`](#0x1_account_abstraction_dispatchable_authenticator_internal) +- [Function `dispatchable_derivable_authenticator_internal`](#0x1_account_abstraction_dispatchable_derivable_authenticator_internal) +- [Function `authenticate`](#0x1_account_abstraction_authenticate) +- [Function `dispatchable_authenticate`](#0x1_account_abstraction_dispatchable_authenticate) +- [Function `add_dispatchable_authentication_function`](#0x1_account_abstraction_add_dispatchable_authentication_function) +- [Function `remove_dispatchable_authentication_function`](#0x1_account_abstraction_remove_dispatchable_authentication_function) +- [Function `remove_dispatchable_authenticator`](#0x1_account_abstraction_remove_dispatchable_authenticator) +- [Specification](#@Specification_1) + - [Function `dispatchable_authenticate`](#@Specification_1_dispatchable_authenticate) + + +
use 0x1::auth_data;
+use 0x1::bcs;
+use 0x1::big_ordered_map;
+use 0x1::create_signer;
+use 0x1::error;
+use 0x1::event;
+use 0x1::features;
+use 0x1::from_bcs;
+use 0x1::function_info;
+use 0x1::hash;
+use 0x1::object;
+use 0x1::option;
+use 0x1::ordered_map;
+use 0x1::permissioned_signer;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::vector;
+
+ + + + + +## Struct `UpdateDispatchableAuthenticator` + + + +
#[event]
+struct UpdateDispatchableAuthenticator has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+update: vector<u8> +
+
+ +
+
+auth_function: function_info::FunctionInfo +
+
+ +
+
+ + +
+ + + +## Struct `RemoveDispatchableAuthenticator` + + + +
#[event]
+struct RemoveDispatchableAuthenticator has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+ + +
+ + + +## Enum Resource `DispatchableAuthenticator` + +The dispatchable authenticator that defines how to authenticates this account in the specified module. +An integral part of Account Abstraction. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+enum DispatchableAuthenticator has copy, drop, key
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+auth_functions: ordered_map::OrderedMap<function_info::FunctionInfo, bool> +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Enum `DerivableRegisterValue` + + + +
enum DerivableRegisterValue has store
+
+ + + +
+Variants + + +
+Empty + + +
+Fields + + +
+
+ + +
+ +
+ +
+ + + +## Enum Resource `DerivableDispatchableAuthenticator` + +The dispatchable derivable-scoped authenticator, that defines how to authenticate + + +
enum DerivableDispatchableAuthenticator has key
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+auth_functions: big_ordered_map::BigOrderedMap<function_info::FunctionInfo, account_abstraction::DerivableRegisterValue> +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Constants + + + + + + +
const MAX_U64: u128 = 18446744073709551615;
+
+ + + + + + + +
const ENOT_MASTER_SIGNER: u64 = 4;
+
+ + + + + +derivable_aa_account_address uses this for domain separation within its native implementation +source is defined in Scheme enum in types/src/transaction/authenticator.rs + + +
const DERIVABLE_ABSTRACTION_DERIVED_SCHEME: u8 = 5;
+
+ + + + + + + +
const EACCOUNT_ABSTRACTION_NOT_ENABLED: u64 = 8;
+
+ + + + + + + +
const EAUTH_FUNCTION_SIGNATURE_MISMATCH: u64 = 3;
+
+ + + + + + + +
const EDEPRECATED_FUNCTION: u64 = 6;
+
+ + + + + + + +
const EDERIVABLE_AA_NOT_INITIALIZED: u64 = 7;
+
+ + + + + + + +
const EDERIVABLE_ACCOUNT_ABSTRACTION_NOT_ENABLED: u64 = 9;
+
+ + + + + + + +
const EDISPATCHABLE_AUTHENTICATOR_IS_NOT_USED: u64 = 1;
+
+ + + + + + + +
const EFUNCTION_INFO_EXISTENCE: u64 = 2;
+
+ + + + + + + +
const EINCONSISTENT_SIGNER_ADDRESS: u64 = 5;
+
+ + + + + +## Function `using_dispatchable_authenticator` + +Return true if the account is an abstracted account that can be authenticated with dispatchable move authenticator. + + +
#[view]
+public fun using_dispatchable_authenticator(addr: address): bool
+
+ + + +
+Implementation + + +
public fun using_dispatchable_authenticator(addr: address): bool {
+    exists<DispatchableAuthenticator>(resource_addr(addr))
+}
+
+ + + +
+ + + +## Function `dispatchable_authenticator` + +Return the current dispatchable authenticator move function info. None means this authentication scheme is disabled. + + +
#[view]
+public fun dispatchable_authenticator(addr: address): option::Option<vector<function_info::FunctionInfo>>
+
+ + + +
+Implementation + + +
public fun dispatchable_authenticator(addr: address): Option<vector<FunctionInfo>> acquires DispatchableAuthenticator {
+    let resource_addr = resource_addr(addr);
+    if (exists<DispatchableAuthenticator>(resource_addr)) {
+        option::some(
+            DispatchableAuthenticator[resource_addr].auth_functions.keys()
+        )
+    } else { option::none() }
+}
+
+ + + +
+ + + +## Function `derive_account_address_view` + +Return the account address corresponding to the given abstract_public_key, +for the derivable account abstraction defined by the given function. + + +
#[view]
+public fun derive_account_address_view(module_address: address, module_name: string::String, function_name: string::String, abstract_public_key: vector<u8>): address
+
+ + + +
+Implementation + + +
public fun derive_account_address_view(
+    module_address: address,
+    module_name: String,
+    function_name: String,
+    abstract_public_key: vector<u8>
+): address {
+    derive_account_address(
+        function_info::new_function_info_from_address(module_address, module_name, function_name),
+        &abstract_public_key,
+    )
+}
+
+ + + +
+ + + +## Function `derive_account_address` + +Return the account address corresponding to the given abstract_public_key, +for the derivable account abstraction defined by the given function. +TODO: probably worth creating some module with all these derived functions, +and do computation/caching in rust to avoid recomputation, as we do for objects. + + +
public fun derive_account_address(derivable_func_info: function_info::FunctionInfo, abstract_public_key: &vector<u8>): address
+
+ + + +
+Implementation + + +
public fun derive_account_address(derivable_func_info: FunctionInfo, abstract_public_key: &vector<u8>): address {
+    // using bcs serialized structs here - this allows for no need for separators.
+    // Alternative would've been to create unique string, we would need to convert derivable_func_info into string,
+    // then authentication_key to hex, and then we need separators as well - like ::
+    let bytes = bcs::to_bytes(&derivable_func_info);
+    bytes.append(bcs::to_bytes(abstract_public_key));
+    bytes.push_back(DERIVABLE_ABSTRACTION_DERIVED_SCHEME);
+    from_bcs::to_address(hash::sha3_256(bytes))
+}
+
+ + + +
+ + + +## Function `add_authentication_function` + +Add dispatchable authentication function that enables account abstraction via this function. +Note: it is a private entry function that can only be called directly from transaction. + + +
entry fun add_authentication_function(account: &signer, module_address: address, module_name: string::String, function_name: string::String)
+
+ + + +
+Implementation + + +
entry fun add_authentication_function(
+    account: &signer,
+    module_address: address,
+    module_name: String,
+    function_name: String,
+) acquires DispatchableAuthenticator {
+    assert!(features::is_account_abstraction_enabled(), error::invalid_state(EACCOUNT_ABSTRACTION_NOT_ENABLED));
+    assert!(!is_permissioned_signer(account), error::permission_denied(ENOT_MASTER_SIGNER));
+    update_dispatchable_authenticator_impl(
+        account,
+        function_info::new_function_info_from_address(module_address, module_name, function_name),
+        true
+    );
+}
+
+ + + +
+ + + +## Function `remove_authentication_function` + +Remove dispatchable authentication function that enables account abstraction via this function. +dispatchable function needs to verify that signing_data.authenticator() is a valid signature of signing_data.digest(). +Note: it is a private entry function that can only be called directly from transaction. + + +
entry fun remove_authentication_function(account: &signer, module_address: address, module_name: string::String, function_name: string::String)
+
+ + + +
+Implementation + + +
entry fun remove_authentication_function(
+    account: &signer,
+    module_address: address,
+    module_name: String,
+    function_name: String,
+) acquires DispatchableAuthenticator {
+    assert!(!is_permissioned_signer(account), error::permission_denied(ENOT_MASTER_SIGNER));
+    update_dispatchable_authenticator_impl(
+        account,
+        function_info::new_function_info_from_address(module_address, module_name, function_name),
+        false
+    );
+}
+
+ + + +
+ + + +## Function `remove_authenticator` + +Remove dispatchable authenticator so that all dispatchable authentication functions will be removed as well. +After calling this function, the account is not abstracted at all. +Note: it is a private entry function that can only be called directly from transaction. + + +
entry fun remove_authenticator(account: &signer)
+
+ + + +
+Implementation + + +
entry fun remove_authenticator(
+    account: &signer,
+) acquires DispatchableAuthenticator {
+    assert!(!is_permissioned_signer(account), error::permission_denied(ENOT_MASTER_SIGNER));
+    let addr = signer::address_of(account);
+    let resource_addr = resource_addr(addr);
+    if (exists<DispatchableAuthenticator>(resource_addr)) {
+        move_from<DispatchableAuthenticator>(resource_addr);
+        event::emit(RemoveDispatchableAuthenticator {
+            account: addr,
+        });
+    };
+}
+
+ + + +
+ + + +## Function `register_derivable_authentication_function` + +Add dispatchable derivable authentication function, that enables account abstraction via this function. +This means all accounts within the domain can use it to authenticate, without needing an initialization (unlike non-domain AA). +dispatchable function needs to verify two things: +- that signing_data.derivable_abstract_signature() is a valid signature of signing_data.digest() (just like regular AA) +- that signing_data.derivable_abstract_public_key() is correct identity representing the authenticator +(missing this step would allow impersonation) + +Note: This is public entry function, as it requires framework signer, and that can +only be obtained as a part of the governance script. + + +
public entry fun register_derivable_authentication_function(aptos_framework: &signer, module_address: address, module_name: string::String, function_name: string::String)
+
+ + + +
+Implementation + + +
public entry fun register_derivable_authentication_function(
+    aptos_framework: &signer,
+    module_address: address,
+    module_name: String,
+    function_name: String,
+) acquires DerivableDispatchableAuthenticator {
+    assert!(features::is_derivable_account_abstraction_enabled(), error::invalid_state(EDERIVABLE_ACCOUNT_ABSTRACTION_NOT_ENABLED));
+    system_addresses::assert_aptos_framework(aptos_framework);
+
+    DerivableDispatchableAuthenticator[@aptos_framework].auth_functions.add(
+        function_info::new_function_info_from_address(module_address, module_name, function_name),
+        DerivableRegisterValue::Empty,
+    );
+}
+
+ + + +
+ + + +## Function `initialize` + + + +
public entry fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    move_to(
+        aptos_framework,
+        DerivableDispatchableAuthenticator::V1 { auth_functions: big_ordered_map::new_with_config(0, 0, false) }
+    );
+}
+
+ + + +
+ + + +## Function `resource_addr` + + + +
fun resource_addr(source: address): address
+
+ + + +
+Implementation + + +
inline fun resource_addr(source: address): address {
+    object::create_user_derived_object_address(source, @aptos_fungible_asset)
+}
+
+ + + +
+ + + +## Function `update_dispatchable_authenticator_impl` + + + +
fun update_dispatchable_authenticator_impl(account: &signer, auth_function: function_info::FunctionInfo, is_add: bool)
+
+ + + +
+Implementation + + +
fun update_dispatchable_authenticator_impl(
+    account: &signer,
+    auth_function: FunctionInfo,
+    is_add: bool,
+) acquires DispatchableAuthenticator {
+    let addr = signer::address_of(account);
+    let resource_addr = resource_addr(addr);
+    let dispatcher_auth_function_info = function_info::new_function_info_from_address(
+        @aptos_framework,
+        string::utf8(b"account_abstraction"),
+        string::utf8(b"dispatchable_authenticate"),
+    );
+    assert!(
+        function_info::check_dispatch_type_compatibility(&dispatcher_auth_function_info, &auth_function),
+        error::invalid_argument(EAUTH_FUNCTION_SIGNATURE_MISMATCH)
+    );
+    if (is_add) {
+        if (!exists<DispatchableAuthenticator>(resource_addr)) {
+            move_to(
+                &create_signer::create_signer(resource_addr),
+                DispatchableAuthenticator::V1 { auth_functions: ordered_map::new() }
+            );
+        };
+        let current_map = &mut borrow_global_mut<DispatchableAuthenticator>(resource_addr).auth_functions;
+        assert!(
+            !current_map.contains(&auth_function),
+            error::already_exists(EFUNCTION_INFO_EXISTENCE)
+        );
+        current_map.add(auth_function, true);
+        event::emit(
+            UpdateDispatchableAuthenticator {
+                account: addr,
+                update: b"add",
+                auth_function,
+            }
+        );
+    } else {
+        assert!(exists<DispatchableAuthenticator>(resource_addr), error::not_found(EFUNCTION_INFO_EXISTENCE));
+        let current_map = &mut borrow_global_mut<DispatchableAuthenticator>(resource_addr).auth_functions;
+        assert!(
+            current_map.contains(&auth_function),
+            error::not_found(EFUNCTION_INFO_EXISTENCE)
+        );
+        current_map.remove(&auth_function);
+        event::emit(
+            UpdateDispatchableAuthenticator {
+                account: addr,
+                update: b"remove",
+                auth_function,
+            }
+        );
+        if (current_map.length() == 0) {
+            remove_authenticator(account);
+        }
+    };
+}
+
+ + + +
+ + + +## Function `dispatchable_authenticator_internal` + + + +
fun dispatchable_authenticator_internal(addr: address): &ordered_map::OrderedMap<function_info::FunctionInfo, bool>
+
+ + + +
+Implementation + + +
inline fun dispatchable_authenticator_internal(addr: address): &OrderedMap<FunctionInfo, bool> {
+    assert!(using_dispatchable_authenticator(addr), error::not_found(EDISPATCHABLE_AUTHENTICATOR_IS_NOT_USED));
+    &DispatchableAuthenticator[resource_addr(addr)].auth_functions
+}
+
+ + + +
+ + + +## Function `dispatchable_derivable_authenticator_internal` + + + +
fun dispatchable_derivable_authenticator_internal(): &big_ordered_map::BigOrderedMap<function_info::FunctionInfo, account_abstraction::DerivableRegisterValue>
+
+ + + +
+Implementation + + +
inline fun dispatchable_derivable_authenticator_internal(): &BigOrderedMap<FunctionInfo, DerivableRegisterValue> {
+    assert!(exists<DerivableDispatchableAuthenticator>(@aptos_framework), error::not_found(EDERIVABLE_AA_NOT_INITIALIZED));
+    &DerivableDispatchableAuthenticator[@aptos_framework].auth_functions
+}
+
+ + + +
+ + + +## Function `authenticate` + + + +
fun authenticate(account: signer, func_info: function_info::FunctionInfo, signing_data: auth_data::AbstractionAuthData): signer
+
+ + + +
+Implementation + + +
fun authenticate(
+    account: signer,
+    func_info: FunctionInfo,
+    signing_data: AbstractionAuthData,
+): signer acquires DispatchableAuthenticator, DerivableDispatchableAuthenticator {
+    let master_signer_addr = signer::address_of(&account);
+
+    if (signing_data.is_derivable()) {
+        assert!(features::is_derivable_account_abstraction_enabled(), error::invalid_state(EDERIVABLE_ACCOUNT_ABSTRACTION_NOT_ENABLED));
+        assert!(master_signer_addr == derive_account_address(func_info, signing_data.derivable_abstract_public_key()), error::invalid_state(EINCONSISTENT_SIGNER_ADDRESS));
+
+        let func_infos = dispatchable_derivable_authenticator_internal();
+        assert!(func_infos.contains(&func_info), error::not_found(EFUNCTION_INFO_EXISTENCE));
+    } else {
+        assert!(features::is_account_abstraction_enabled(), error::invalid_state(EACCOUNT_ABSTRACTION_NOT_ENABLED));
+
+        let func_infos = dispatchable_authenticator_internal(master_signer_addr);
+        assert!(func_infos.contains(&func_info), error::not_found(EFUNCTION_INFO_EXISTENCE));
+    };
+
+    function_info::load_module_from_function(&func_info);
+    let returned_signer = dispatchable_authenticate(account, signing_data, &func_info);
+    // Returned signer MUST represent the same account address. Otherwise, it may break the invariant of Aptos blockchain!
+    assert!(
+        master_signer_addr == signer::address_of(&returned_signer),
+        error::invalid_state(EINCONSISTENT_SIGNER_ADDRESS)
+    );
+    returned_signer
+}
+
+ + + +
+ + + +## Function `dispatchable_authenticate` + +The native function to dispatch customized move authentication function. + + +
fun dispatchable_authenticate(account: signer, signing_data: auth_data::AbstractionAuthData, function: &function_info::FunctionInfo): signer
+
+ + + +
+Implementation + + +
native fun dispatchable_authenticate(
+    account: signer,
+    signing_data: AbstractionAuthData,
+    function: &FunctionInfo
+): signer;
+
+ + + +
+ + + +## Function `add_dispatchable_authentication_function` + + + +
#[deprecated]
+public entry fun add_dispatchable_authentication_function(_account: &signer, _module_address: address, _module_name: string::String, _function_name: string::String)
+
+ + + +
+Implementation + + +
public entry fun add_dispatchable_authentication_function(
+    _account: &signer,
+    _module_address: address,
+    _module_name: String,
+    _function_name: String,
+) {
+    abort std::error::unavailable(EDEPRECATED_FUNCTION)
+}
+
+ + + +
+ + + +## Function `remove_dispatchable_authentication_function` + + + +
#[deprecated]
+public entry fun remove_dispatchable_authentication_function(_account: &signer, _module_address: address, _module_name: string::String, _function_name: string::String)
+
+ + + +
+Implementation + + +
public entry fun remove_dispatchable_authentication_function(
+    _account: &signer,
+    _module_address: address,
+    _module_name: String,
+    _function_name: String,
+) {
+    abort std::error::unavailable(EDEPRECATED_FUNCTION)
+}
+
+ + + +
+ + + +## Function `remove_dispatchable_authenticator` + + + +
#[deprecated]
+public entry fun remove_dispatchable_authenticator(_account: &signer)
+
+ + + +
+Implementation + + +
public entry fun remove_dispatchable_authenticator(
+    _account: &signer,
+) {
+    abort std::error::unavailable(EDEPRECATED_FUNCTION)
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + + + + + + +
fun spec_dispatchable_authenticate(
+   account: signer,
+   signing_data: AbstractionAuthData,
+   function: &FunctionInfo
+): signer;
+
+ + + + + +### Function `dispatchable_authenticate` + + +
fun dispatchable_authenticate(account: signer, signing_data: auth_data::AbstractionAuthData, function: &function_info::FunctionInfo): signer
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_dispatchable_authenticate(account, signing_data, function);
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/aggregator.md b/aptos-move/framework/aptos-framework/doc/aggregator.md index 4d2bfca013199..be20daf6b83c1 100644 --- a/aptos-move/framework/aptos-framework/doc/aggregator.md +++ b/aptos-move/framework/aptos-framework/doc/aggregator.md @@ -330,10 +330,9 @@ Destroys an aggregator and removes it from its AggregatorFactory. -
pragma opaque;
+
pragma intrinsic;
 // This enforces high-level requirement 1:
-aborts_if false;
-ensures [abstract] result == spec_get_limit(aggregator);
+aborts_if [abstract] false;
 
diff --git a/aptos-move/framework/aptos-framework/doc/aggregator_factory.md b/aptos-move/framework/aptos-framework/doc/aggregator_factory.md index e29646e2d4b09..4e53e07bd5b20 100644 --- a/aptos-move/framework/aptos-framework/doc/aggregator_factory.md +++ b/aptos-move/framework/aptos-framework/doc/aggregator_factory.md @@ -68,6 +68,15 @@ account can. ## Constants + + + + +
const MAX_U128: u128 = 340282366920938463463374607431768211455;
+
+ + + Aggregator factory is not published yet. @@ -78,6 +87,16 @@ Aggregator factory is not published yet. + + +Aggregator V1 only supports limit == MAX_U128. + + +
const EAGG_V1_LIMIT_DEPRECATED: u64 = 2;
+
+ + + ## Function `initialize_aggregator_factory` @@ -114,7 +133,7 @@ Creates a new factory for aggregators. Can only be called during genesis. Creates a new aggregator instance which overflows on exceeding a limit. -
public(friend) fun create_aggregator_internal(limit: u128): aggregator::Aggregator
+
public(friend) fun create_aggregator_internal(): aggregator::Aggregator
 
@@ -123,14 +142,14 @@ Creates a new aggregator instance which overflows on exceeding a limitImplementation -
public(friend) fun create_aggregator_internal(limit: u128): Aggregator acquires AggregatorFactory {
+
public(friend) fun create_aggregator_internal(): Aggregator acquires AggregatorFactory {
     assert!(
         exists<AggregatorFactory>(@aptos_framework),
         error::not_found(EAGGREGATOR_FACTORY_NOT_FOUND)
     );
 
     let aggregator_factory = borrow_global_mut<AggregatorFactory>(@aptos_framework);
-    new_aggregator(aggregator_factory, limit)
+    new_aggregator(aggregator_factory, MAX_U128)
 }
 
@@ -146,7 +165,8 @@ This is currently a function closed for public. This can be updated in the futur to allow any signer to call. -
public fun create_aggregator(account: &signer, limit: u128): aggregator::Aggregator
+
#[deprecated]
+public fun create_aggregator(account: &signer, limit: u128): aggregator::Aggregator
 
@@ -156,9 +176,15 @@ to allow any signer to call.
public fun create_aggregator(account: &signer, limit: u128): Aggregator acquires AggregatorFactory {
+    // deprecated. Currently used only in aptos-move/e2e-move-tests/src/tests/aggregator.data/pack/sources/aggregator_test.move
+
     // Only Aptos Framework (0x1) account can call this for now.
     system_addresses::assert_aptos_framework(account);
-    create_aggregator_internal(limit)
+    assert!(
+        limit == MAX_U128,
+        error::invalid_argument(EAGG_V1_LIMIT_DEPRECATED)
+    );
+    create_aggregator_internal()
 }
 
@@ -279,7 +305,7 @@ AggregatorFactory is not under the caller before creating the resource. ### Function `create_aggregator_internal` -
public(friend) fun create_aggregator_internal(limit: u128): aggregator::Aggregator
+
public(friend) fun create_aggregator_internal(): aggregator::Aggregator
 
@@ -287,7 +313,7 @@ AggregatorFactory is not under the caller before creating the resource.
// This enforces high-level requirement 2:
 include CreateAggregatorInternalAbortsIf;
-ensures aggregator::spec_get_limit(result) == limit;
+ensures aggregator::spec_get_limit(result) == MAX_U128;
 ensures aggregator::spec_aggregator_get_val(result) == 0;
 
@@ -309,7 +335,8 @@ AggregatorFactory is not under the caller before creating the resource. ### Function `create_aggregator` -
public fun create_aggregator(account: &signer, limit: u128): aggregator::Aggregator
+
#[deprecated]
+public fun create_aggregator(account: &signer, limit: u128): aggregator::Aggregator
 
@@ -320,6 +347,7 @@ AggregatorFactory existed under the @aptos_framework when Creating a new aggrega
let addr = signer::address_of(account);
 // This enforces high-level requirement 3:
 aborts_if addr != @aptos_framework;
+aborts_if limit != MAX_U128;
 aborts_if !exists<AggregatorFactory>(@aptos_framework);
 
diff --git a/aptos-move/framework/aptos-framework/doc/aggregator_v2.md b/aptos-move/framework/aptos-framework/doc/aggregator_v2.md index 842ad22ecdab3..de7cb1e7cc7d9 100644 --- a/aptos-move/framework/aptos-framework/doc/aggregator_v2.md +++ b/aptos-move/framework/aptos-framework/doc/aggregator_v2.md @@ -50,14 +50,22 @@ read, read_snapshot, read_derived_string - [Function `copy_snapshot`](#0x1_aggregator_v2_copy_snapshot) - [Function `string_concat`](#0x1_aggregator_v2_string_concat) - [Specification](#@Specification_1) + - [Struct `Aggregator`](#@Specification_1_Aggregator) + - [Function `max_value`](#@Specification_1_max_value) - [Function `create_aggregator`](#@Specification_1_create_aggregator) - [Function `create_unbounded_aggregator`](#@Specification_1_create_unbounded_aggregator) - [Function `try_add`](#@Specification_1_try_add) + - [Function `add`](#@Specification_1_add) - [Function `try_sub`](#@Specification_1_try_sub) + - [Function `sub`](#@Specification_1_sub) - [Function `is_at_least_impl`](#@Specification_1_is_at_least_impl) - [Function `read`](#@Specification_1_read) - [Function `snapshot`](#@Specification_1_snapshot) - [Function `create_snapshot`](#@Specification_1_create_snapshot) + - [Function `read_snapshot`](#@Specification_1_read_snapshot) + - [Function `read_derived_string`](#@Specification_1_read_derived_string) + - [Function `create_derived_string`](#@Specification_1_create_derived_string) + - [Function `derive_string_concat`](#@Specification_1_derive_string_concat) - [Function `copy_snapshot`](#@Specification_1_copy_snapshot) - [Function `string_concat`](#@Specification_1_string_concat) @@ -217,7 +225,7 @@ and any calls will raise this error. -Arguments passed to concat exceed max limit of 256 bytes (for prefix and suffix together). +Arguments passed to concat exceed max limit of 1024 bytes (for prefix and suffix together).
const ECONCAT_STRING_LENGTH_TOO_LARGE: u64 = 8;
@@ -252,7 +260,7 @@ The generic type supplied to the aggregator is not supported.
 Returns max_value exceeding which aggregator overflows.
 
 
-
public fun max_value<IntElement: copy, drop>(aggregator: &aggregator_v2::Aggregator<IntElement>): IntElement
+
public fun max_value<IntElement: copy, drop>(self: &aggregator_v2::Aggregator<IntElement>): IntElement
 
@@ -261,8 +269,8 @@ Returns max_value exceeding which aggregator overflows. Implementation -
public fun max_value<IntElement: copy + drop>(aggregator: &Aggregator<IntElement>): IntElement {
-    aggregator.max_value
+
public fun max_value<IntElement: copy + drop>(self: &Aggregator<IntElement>): IntElement {
+    self.max_value
 }
 
@@ -313,7 +321,7 @@ EAGGREGATOR_ELEMENT_TYPE_NOT_SUPPORTED raised if called with a different type.
public fun create_aggregator_with_value<IntElement: copy + drop>(start_value: IntElement, max_value: IntElement): Aggregator<IntElement> {
     let aggregator = create_aggregator(max_value);
-    add(&mut aggregator, start_value);
+    aggregator.add(start_value);
     aggregator
 }
 
@@ -366,7 +374,7 @@ EAGGREGATOR_ELEMENT_TYPE_NOT_SUPPORTED raised if called with a different type.
public fun create_unbounded_aggregator_with_value<IntElement: copy + drop>(start_value: IntElement): Aggregator<IntElement> {
     let aggregator = create_unbounded_aggregator();
-    add(&mut aggregator, start_value);
+    aggregator.add(start_value);
     aggregator
 }
 
@@ -385,7 +393,7 @@ If addition would exceed the max_value, false is returned, a Parallelism info: This operation enables speculative parallelism. -
public fun try_add<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
+
public fun try_add<IntElement>(self: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
 
@@ -394,7 +402,7 @@ Parallelism info: This operation enables speculative parallelism. Implementation -
public native fun try_add<IntElement>(aggregator: &mut Aggregator<IntElement>, value: IntElement): bool;
+
public native fun try_add<IntElement>(self: &mut Aggregator<IntElement>, value: IntElement): bool;
 
@@ -411,7 +419,7 @@ If addition would exceed the max_value, EAGGREGATOR_OVERFLOW exception will be t Parallelism info: This operation enables speculative parallelism. -
public fun add<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
public fun add<IntElement>(self: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
 
@@ -420,8 +428,8 @@ Parallelism info: This operation enables speculative parallelism. Implementation -
public fun add<IntElement>(aggregator: &mut Aggregator<IntElement>, value: IntElement) {
-    assert!(try_add(aggregator, value), error::out_of_range(EAGGREGATOR_OVERFLOW));
+
public fun add<IntElement>(self: &mut Aggregator<IntElement>, value: IntElement) {
+    assert!(self.try_add(value), error::out_of_range(EAGGREGATOR_OVERFLOW));
 }
 
@@ -439,7 +447,7 @@ If subtraction would result in a negative value, false is re Parallelism info: This operation enables speculative parallelism. -
public fun try_sub<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
+
public fun try_sub<IntElement>(self: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
 
@@ -448,7 +456,7 @@ Parallelism info: This operation enables speculative parallelism. Implementation -
public native fun try_sub<IntElement>(aggregator: &mut Aggregator<IntElement>, value: IntElement): bool;
+
public native fun try_sub<IntElement>(self: &mut Aggregator<IntElement>, value: IntElement): bool;
 
@@ -463,7 +471,7 @@ Parallelism info: This operation enables speculative parallelism. Parallelism info: This operation enables speculative parallelism. -
public fun sub<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
public fun sub<IntElement>(self: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
 
@@ -472,8 +480,8 @@ Parallelism info: This operation enables speculative parallelism. Implementation -
public fun sub<IntElement>(aggregator: &mut Aggregator<IntElement>, value: IntElement) {
-    assert!(try_sub(aggregator, value), error::out_of_range(EAGGREGATOR_UNDERFLOW));
+
public fun sub<IntElement>(self: &mut Aggregator<IntElement>, value: IntElement) {
+    assert!(self.try_sub(value), error::out_of_range(EAGGREGATOR_UNDERFLOW));
 }
 
@@ -487,7 +495,7 @@ Parallelism info: This operation enables speculative parallelism. -
fun is_at_least_impl<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
+
fun is_at_least_impl<IntElement>(self: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
 
@@ -496,7 +504,7 @@ Parallelism info: This operation enables speculative parallelism. Implementation -
native fun is_at_least_impl<IntElement>(aggregator: &Aggregator<IntElement>, min_amount: IntElement): bool;
+
native fun is_at_least_impl<IntElement>(self: &Aggregator<IntElement>, min_amount: IntElement): bool;
 
@@ -517,7 +525,7 @@ Until traits are deployed, is_at_most/is_equal utility Parallelism info: This operation enables speculative parallelism. -
public fun is_at_least<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
+
public fun is_at_least<IntElement>(self: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
 
@@ -526,9 +534,9 @@ Parallelism info: This operation enables speculative parallelism. Implementation -
public fun is_at_least<IntElement>(aggregator: &Aggregator<IntElement>, min_amount: IntElement): bool {
+
public fun is_at_least<IntElement>(self: &Aggregator<IntElement>, min_amount: IntElement): bool {
     assert!(features::aggregator_v2_is_at_least_api_enabled(), EAGGREGATOR_API_V2_NOT_ENABLED);
-    is_at_least_impl(aggregator, min_amount)
+    self.is_at_least_impl(min_amount)
 }
 
@@ -552,7 +560,7 @@ up to two times slower. Parallelism info: This operation *prevents* speculative parallelism. -
public fun read<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>): IntElement
+
public fun read<IntElement>(self: &aggregator_v2::Aggregator<IntElement>): IntElement
 
@@ -561,7 +569,7 @@ Parallelism info: This operation *prevents* speculative parallelism. Implementation -
public native fun read<IntElement>(aggregator: &Aggregator<IntElement>): IntElement;
+
public native fun read<IntElement>(self: &Aggregator<IntElement>): IntElement;
 
@@ -578,7 +586,7 @@ Unlike read(), it is fast and avoids sequential dependencies. Parallelism info: This operation enables parallelism. -
public fun snapshot<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>): aggregator_v2::AggregatorSnapshot<IntElement>
+
public fun snapshot<IntElement>(self: &aggregator_v2::Aggregator<IntElement>): aggregator_v2::AggregatorSnapshot<IntElement>
 
@@ -587,7 +595,7 @@ Parallelism info: This operation enables parallelism. Implementation -
public native fun snapshot<IntElement>(aggregator: &Aggregator<IntElement>): AggregatorSnapshot<IntElement>;
+
public native fun snapshot<IntElement>(self: &Aggregator<IntElement>): AggregatorSnapshot<IntElement>;
 
@@ -630,7 +638,7 @@ or has other read/write conflicts) Parallelism info: This operation *prevents* speculative parallelism. -
public fun read_snapshot<IntElement>(snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>): IntElement
+
public fun read_snapshot<IntElement>(self: &aggregator_v2::AggregatorSnapshot<IntElement>): IntElement
 
@@ -639,7 +647,7 @@ Parallelism info: This operation *prevents* speculative parallelism. Implementation -
public native fun read_snapshot<IntElement>(snapshot: &AggregatorSnapshot<IntElement>): IntElement;
+
public native fun read_snapshot<IntElement>(self: &AggregatorSnapshot<IntElement>): IntElement;
 
@@ -658,7 +666,7 @@ or has other read/write conflicts) Parallelism info: This operation *prevents* speculative parallelism. -
public fun read_derived_string(snapshot: &aggregator_v2::DerivedStringSnapshot): string::String
+
public fun read_derived_string(self: &aggregator_v2::DerivedStringSnapshot): string::String
 
@@ -667,7 +675,7 @@ Parallelism info: This operation *prevents* speculative parallelism. Implementation -
public native fun read_derived_string(snapshot: &DerivedStringSnapshot): String;
+
public native fun read_derived_string(self: &DerivedStringSnapshot): String;
 
@@ -705,7 +713,7 @@ Useful for when object is sometimes created via string_concat(), and sometimes d Concatenates before, snapshot and after into a single string. snapshot passed needs to have integer type - currently supported types are u64 and u128. Raises EUNSUPPORTED_AGGREGATOR_SNAPSHOT_TYPE if called with another type. -If length of prefix and suffix together exceed 256 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised. +If length of prefix and suffix together exceeds 1024 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised. Parallelism info: This operation enables parallelism. @@ -779,6 +787,54 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI ## Specification + + +### Struct `Aggregator` + + +
struct Aggregator<IntElement> has drop, store
+
+ + + +
+
+value: IntElement +
+
+ +
+
+max_value: IntElement +
+
+ +
+
+ + + +
pragma intrinsic;
+
+ + + + + +### Function `max_value` + + +
public fun max_value<IntElement: copy, drop>(self: &aggregator_v2::Aggregator<IntElement>): IntElement
+
+ + + + +
pragma intrinsic;
+
+ + + ### Function `create_aggregator` @@ -790,7 +846,7 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI -
pragma opaque;
+
pragma intrinsic;
 
@@ -806,7 +862,7 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI -
pragma opaque;
+
pragma intrinsic;
 
@@ -816,13 +872,29 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI ### Function `try_add` -
public fun try_add<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
+
public fun try_add<IntElement>(self: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
 
-
pragma opaque;
+
pragma intrinsic;
+
+ + + + + +### Function `add` + + +
public fun add<IntElement>(self: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + + +
pragma intrinsic;
 
@@ -832,13 +904,29 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI ### Function `try_sub` -
public fun try_sub<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
+
public fun try_sub<IntElement>(self: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement): bool
 
-
pragma opaque;
+
pragma intrinsic;
+
+ + + + + +### Function `sub` + + +
public fun sub<IntElement>(self: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + + +
pragma intrinsic;
 
@@ -848,13 +936,13 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI ### Function `is_at_least_impl` -
fun is_at_least_impl<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
+
fun is_at_least_impl<IntElement>(self: &aggregator_v2::Aggregator<IntElement>, min_amount: IntElement): bool
 
-
pragma opaque;
+
pragma intrinsic;
 
@@ -864,13 +952,13 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI ### Function `read` -
public fun read<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>): IntElement
+
public fun read<IntElement>(self: &aggregator_v2::Aggregator<IntElement>): IntElement
 
-
pragma opaque;
+
pragma intrinsic;
 
@@ -880,13 +968,15 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI ### Function `snapshot` -
public fun snapshot<IntElement>(aggregator: &aggregator_v2::Aggregator<IntElement>): aggregator_v2::AggregatorSnapshot<IntElement>
+
public fun snapshot<IntElement>(self: &aggregator_v2::Aggregator<IntElement>): aggregator_v2::AggregatorSnapshot<IntElement>
 
pragma opaque;
+include AbortsIfIntElement<IntElement>;
+ensures [abstract] result.value == spec_get_value(self);
 
@@ -903,6 +993,92 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+include AbortsIfIntElement<IntElement>;
+ensures [abstract] result.value == value;
+
+ + + + + +### Function `read_snapshot` + + +
public fun read_snapshot<IntElement>(self: &aggregator_v2::AggregatorSnapshot<IntElement>): IntElement
+
+ + + + +
pragma opaque;
+include AbortsIfIntElement<IntElement>;
+ensures [abstract] result == self.value;
+
+ + + + + +### Function `read_derived_string` + + +
public fun read_derived_string(self: &aggregator_v2::DerivedStringSnapshot): string::String
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == self.value;
+
+ + + + + +### Function `create_derived_string` + + +
public fun create_derived_string(value: string::String): aggregator_v2::DerivedStringSnapshot
+
+ + + + +
pragma opaque;
+aborts_if [abstract] len(value.bytes) > 1024;
+ensures [abstract] result.value == value;
+
+ + + + + +### Function `derive_string_concat` + + +
public fun derive_string_concat<IntElement>(before: string::String, snapshot: &aggregator_v2::AggregatorSnapshot<IntElement>, after: string::String): aggregator_v2::DerivedStringSnapshot
+
+ + + + +
pragma opaque;
+include AbortsIfIntElement<IntElement>;
+ensures [abstract] result.value.bytes == concat(before.bytes, concat(spec_get_string_value(snapshot).bytes, after.bytes));
+aborts_if [abstract] len(before.bytes) + len(after.bytes) > 1024;
+
+ + + + + + + +
schema AbortsIfIntElement<IntElement> {
+    aborts_if [abstract] type_info::type_name<IntElement>().bytes != b"u64" && type_info::type_name<IntElement>().bytes != b"u128";
+}
 
@@ -920,6 +1096,7 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+aborts_if [abstract] true;
 
@@ -937,6 +1114,56 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+aborts_if [abstract] true;
+
+ + + + + + + +
native fun spec_get_value<IntElement>(aggregator: Aggregator<IntElement>): IntElement;
+
+ + + + + + + +
native fun spec_get_max_value<IntElement>(aggregator: Aggregator<IntElement>): IntElement;
+
+ + + + + + + +
fun spec_get_string_value<IntElement>(aggregator: AggregatorSnapshot<IntElement>): String;
+
+ + + + + + + +
fun spec_read_snapshot<IntElement>(snapshot: AggregatorSnapshot<IntElement>): IntElement {
+   snapshot.value
+}
+
+ + + + + + + +
fun spec_read_derived_string(snapshot: DerivedStringSnapshot): String {
+   snapshot.value
+}
 
diff --git a/aptos-move/framework/aptos-framework/doc/aptos_account.md b/aptos-move/framework/aptos-framework/doc/aptos_account.md index 7ce465a7d176a..bcfd8106cfa8c 100644 --- a/aptos-move/framework/aptos-framework/doc/aptos_account.md +++ b/aptos-move/framework/aptos-framework/doc/aptos_account.md @@ -15,6 +15,9 @@ - [Function `batch_transfer_coins`](#0x1_aptos_account_batch_transfer_coins) - [Function `transfer_coins`](#0x1_aptos_account_transfer_coins) - [Function `deposit_coins`](#0x1_aptos_account_deposit_coins) +- [Function `batch_transfer_fungible_assets`](#0x1_aptos_account_batch_transfer_fungible_assets) +- [Function `transfer_fungible_assets`](#0x1_aptos_account_transfer_fungible_assets) +- [Function `deposit_fungible_assets`](#0x1_aptos_account_deposit_fungible_assets) - [Function `assert_account_exists`](#0x1_aptos_account_assert_account_exists) - [Function `assert_account_is_registered_for_apt`](#0x1_aptos_account_assert_account_is_registered_for_apt) - [Function `set_allow_direct_coin_transfers`](#0x1_aptos_account_set_allow_direct_coin_transfers) @@ -22,7 +25,7 @@ - [Function `register_apt`](#0x1_aptos_account_register_apt) - [Function `fungible_transfer_only`](#0x1_aptos_account_fungible_transfer_only) - [Function `is_fungible_balance_at_least`](#0x1_aptos_account_is_fungible_balance_at_least) -- [Function `burn_from_fungible_store`](#0x1_aptos_account_burn_from_fungible_store) +- [Function `burn_from_fungible_store_for_gas`](#0x1_aptos_account_burn_from_fungible_store_for_gas) - [Function `ensure_primary_fungible_store_exists`](#0x1_aptos_account_ensure_primary_fungible_store_exists) - [Function `primary_fungible_store_address`](#0x1_aptos_account_primary_fungible_store_address) - [Specification](#@Specification_1) @@ -34,6 +37,9 @@ - [Function `batch_transfer_coins`](#@Specification_1_batch_transfer_coins) - [Function `transfer_coins`](#@Specification_1_transfer_coins) - [Function `deposit_coins`](#@Specification_1_deposit_coins) + - [Function `batch_transfer_fungible_assets`](#@Specification_1_batch_transfer_fungible_assets) + - [Function `transfer_fungible_assets`](#@Specification_1_transfer_fungible_assets) + - [Function `deposit_fungible_assets`](#@Specification_1_deposit_fungible_assets) - [Function `assert_account_exists`](#@Specification_1_assert_account_exists) - [Function `assert_account_is_registered_for_apt`](#@Specification_1_assert_account_is_registered_for_apt) - [Function `set_allow_direct_coin_transfers`](#@Specification_1_set_allow_direct_coin_transfers) @@ -41,7 +47,7 @@ - [Function `register_apt`](#@Specification_1_register_apt) - [Function `fungible_transfer_only`](#@Specification_1_fungible_transfer_only) - [Function `is_fungible_balance_at_least`](#@Specification_1_is_fungible_balance_at_least) - - [Function `burn_from_fungible_store`](#@Specification_1_burn_from_fungible_store) + - [Function `burn_from_fungible_store_for_gas`](#@Specification_1_burn_from_fungible_store_for_gas)
use 0x1::account;
@@ -393,9 +399,10 @@ This would create the recipient account first and register it to receive the Coi
     if (!account::exists_at(to)) {
         create_account(to);
         spec {
-            assert coin::spec_is_account_registered<AptosCoin>(to);
-            assume aptos_std::type_info::type_of<CoinType>() == aptos_std::type_info::type_of<AptosCoin>() ==>
-                coin::spec_is_account_registered<CoinType>(to);
+            // TODO(fa_migration)
+            // assert coin::spec_is_account_registered<AptosCoin>(to);
+            // assume aptos_std::type_info::type_of<CoinType>() == aptos_std::type_info::type_of<AptosCoin>() ==>
+            //     coin::spec_is_account_registered<CoinType>(to);
         };
     };
     if (!coin::is_account_registered<CoinType>(to)) {
@@ -411,6 +418,100 @@ This would create the recipient account first and register it to receive the Coi
 
 
 
+
+
+
+
+## Function `batch_transfer_fungible_assets`
+
+Batch version of transfer_fungible_assets.
+
+
+
public entry fun batch_transfer_fungible_assets(from: &signer, metadata: object::Object<fungible_asset::Metadata>, recipients: vector<address>, amounts: vector<u64>)
+
+ + + +
+Implementation + + +
public entry fun batch_transfer_fungible_assets(
+    from: &signer,
+    metadata: Object<Metadata>,
+    recipients: vector<address>,
+    amounts: vector<u64>
+) {
+    let recipients_len = vector::length(&recipients);
+    assert!(
+        recipients_len == vector::length(&amounts),
+        error::invalid_argument(EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH),
+    );
+
+    vector::enumerate_ref(&recipients, |i, to| {
+        let amount = *vector::borrow(&amounts, i);
+        transfer_fungible_assets(from, metadata, *to, amount);
+    });
+}
+
+ + + +
+ + + +## Function `transfer_fungible_assets` + +Convenient function to deposit fungible asset into a recipient account that might not exist. +This would create the recipient account first to receive the fungible assets. + + +
public entry fun transfer_fungible_assets(from: &signer, metadata: object::Object<fungible_asset::Metadata>, to: address, amount: u64)
+
+ + + +
+Implementation + + +
public entry fun transfer_fungible_assets(from: &signer, metadata: Object<Metadata>, to: address, amount: u64) {
+    deposit_fungible_assets(to, primary_fungible_store::withdraw(from, metadata, amount));
+}
+
+ + + +
+ + + +## Function `deposit_fungible_assets` + +Convenient function to deposit fungible asset into a recipient account that might not exist. +This would create the recipient account first to receive the fungible assets. + + +
public fun deposit_fungible_assets(to: address, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public fun deposit_fungible_assets(to: address, fa: FungibleAsset) {
+    if (!account::exists_at(to)) {
+        create_account(to);
+    };
+    primary_fungible_store::deposit(to, fa)
+}
+
+ + +
@@ -491,10 +592,11 @@ Set whether account can receiv if (std::features::module_event_migration_enabled()) { emit(DirectCoinTransferConfigUpdated { account: addr, new_allow_direct_transfers: allow }); + } else { + emit_event( + &mut direct_transfer_config.update_coin_transfer_events, + DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow }); }; - emit_event( - &mut direct_transfer_config.update_coin_transfer_events, - DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow }); } else { let direct_transfer_config = DirectTransferConfig { allow_arbitrary_coin_transfers: allow, @@ -502,10 +604,11 @@ Set whether account can receiv }; if (std::features::module_event_migration_enabled()) { emit(DirectCoinTransferConfigUpdated { account: addr, new_allow_direct_transfers: allow }); + } else { + emit_event( + &mut direct_transfer_config.update_coin_transfer_events, + DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow }); }; - emit_event( - &mut direct_transfer_config.update_coin_transfer_events, - DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow }); move_to(account, direct_transfer_config); }; } @@ -585,7 +688,7 @@ TODO: once migration is complete, rename to just "transfer_only" and make it an to transfer APT) - if we want to allow APT PFS without account itself -
fun fungible_transfer_only(source: &signer, to: address, amount: u64)
+
public(friend) entry fun fungible_transfer_only(source: &signer, to: address, amount: u64)
 
@@ -594,7 +697,7 @@ to transfer APT) - if we want to allow APT PFS without account itself Implementation -
fun fungible_transfer_only(
+
public(friend) entry fun fungible_transfer_only(
     source: &signer, to: address, amount: u64
 ) {
     let sender_store = ensure_primary_fungible_store_exists(signer::address_of(source));
@@ -605,7 +708,8 @@ to transfer APT) - if we want to allow APT PFS without account itself
     // as APT cannot be frozen or have dispatch, and PFS cannot be transfered
     // (PFS could potentially be burned. regular transfer would permanently unburn the store.
     // Ignoring the check here has the equivalent of unburning, transfers, and then burning again)
-    fungible_asset::deposit_internal(recipient_store, fungible_asset::withdraw_internal(sender_store, amount));
+    fungible_asset::withdraw_permission_check_by_address(source, sender_store, amount);
+    fungible_asset::unchecked_deposit(recipient_store, fungible_asset::unchecked_withdraw(sender_store, amount));
 }
 
@@ -639,14 +743,14 @@ Is balance from APT Primary FungibleStore at least the given amount - + -## Function `burn_from_fungible_store` +## Function `burn_from_fungible_store_for_gas` -Burn from APT Primary FungibleStore +Burn from APT Primary FungibleStore for gas charge -
public(friend) fun burn_from_fungible_store(ref: &fungible_asset::BurnRef, account: address, amount: u64)
+
public(friend) fun burn_from_fungible_store_for_gas(ref: &fungible_asset::BurnRef, account: address, amount: u64)
 
@@ -655,7 +759,7 @@ Burn from APT Primary FungibleStore Implementation -
public(friend) fun burn_from_fungible_store(
+
public(friend) fun burn_from_fungible_store_for_gas(
     ref: &BurnRef,
     account: address,
     amount: u64,
@@ -663,7 +767,7 @@ Burn from APT Primary FungibleStore
     // Skip burning if amount is zero. This shouldn't error out as it's called as part of transaction fee burning.
     if (amount != 0) {
         let store_addr = primary_fungible_store_address(account);
-        fungible_asset::address_burn_from(ref, store_addr, amount);
+        fungible_asset::address_burn_from_for_gas(ref, store_addr, amount);
     };
 }
 
@@ -831,7 +935,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
// This enforces high-level requirement 1:
 pragma aborts_if_is_partial;
 include CreateAccountAbortsIf;
-ensures exists<account::Account>(auth_key);
 
@@ -843,7 +946,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
schema CreateAccountAbortsIf {
     auth_key: address;
     aborts_if exists<account::Account>(auth_key);
-    aborts_if length_judgment(auth_key);
     aborts_if auth_key == @vm_reserved || auth_key == @aptos_framework || auth_key == @aptos_token;
 }
 
@@ -879,18 +981,14 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to let account_addr_source = signer::address_of(source); let coin_store_source = global<coin::CoinStore<AptosCoin>>(account_addr_source); let balance_source = coin_store_source.coin.value; -requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; -requires exists i in 0..len(recipients): - amounts[i] > 0; aborts_if len(recipients) != len(amounts); aborts_if exists i in 0..len(recipients): - !account::exists_at(recipients[i]) && length_judgment(recipients[i]); + !account::spec_exists_at(recipients[i]) && length_judgment(recipients[i]); aborts_if exists i in 0..len(recipients): - !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token); + !account::spec_exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token); ensures forall i in 0..len(recipients): - (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) && - (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token)); + (!account::spec_exists_at(recipients[i]) ==> !length_judgment(recipients[i])) && + (!account::spec_exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token)); aborts_if exists i in 0..len(recipients): !exists<coin::CoinStore<AptosCoin>>(account_addr_source); aborts_if exists i in 0..len(recipients): @@ -900,9 +998,9 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to aborts_if exists i in 0..len(recipients): exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<coin::CoinStore<AptosCoin>>(recipients[i]).frozen; aborts_if exists i in 0..len(recipients): - account::exists_at(recipients[i]) && !exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; + account::spec_exists_at(recipients[i]) && !exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; aborts_if exists i in 0..len(recipients): - account::exists_at(recipients[i]) && !exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 > MAX_U64; + account::spec_exists_at(recipients[i]) && !exists<coin::CoinStore<AptosCoin>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 > MAX_U64;
@@ -920,7 +1018,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
pragma verify = false;
 let account_addr_source = signer::address_of(source);
-requires account_addr_source != to;
 include CreateAccountTransferAbortsIf;
 include GuidAbortsIf<AptosCoin>;
 include WithdrawAbortsIf<AptosCoin>{from: source};
@@ -948,19 +1045,15 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
 let account_addr_source = signer::address_of(from);
 let coin_store_source = global<coin::CoinStore<CoinType>>(account_addr_source);
 let balance_source = coin_store_source.coin.value;
-requires forall i in 0..len(recipients):
-    recipients[i] != account_addr_source;
-requires exists i in 0..len(recipients):
-    amounts[i] > 0;
 // This enforces high-level requirement 7:
 aborts_if len(recipients) != len(amounts);
 aborts_if exists i in 0..len(recipients):
-        !account::exists_at(recipients[i]) && length_judgment(recipients[i]);
+        !account::spec_exists_at(recipients[i]) && length_judgment(recipients[i]);
 aborts_if exists i in 0..len(recipients):
-        !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token);
+        !account::spec_exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token);
 ensures forall i in 0..len(recipients):
-        (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) &&
-            (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token));
+        (!account::spec_exists_at(recipients[i]) ==> !length_judgment(recipients[i])) &&
+            (!account::spec_exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token));
 aborts_if exists i in 0..len(recipients):
     !exists<coin::CoinStore<CoinType>>(account_addr_source);
 aborts_if exists i in 0..len(recipients):
@@ -970,11 +1063,9 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
 aborts_if exists i in 0..len(recipients):
     exists<coin::CoinStore<CoinType>>(recipients[i]) && global<coin::CoinStore<CoinType>>(recipients[i]).frozen;
 aborts_if exists i in 0..len(recipients):
-    account::exists_at(recipients[i]) && !exists<coin::CoinStore<CoinType>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
-aborts_if exists i in 0..len(recipients):
-    account::exists_at(recipients[i]) && !exists<coin::CoinStore<CoinType>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 > MAX_U64;
+    account::spec_exists_at(recipients[i]) && !exists<coin::CoinStore<CoinType>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
 aborts_if exists i in 0..len(recipients):
-    !coin::spec_is_account_registered<CoinType>(recipients[i]) && !type_info::spec_is_struct<CoinType>();
+    account::spec_exists_at(recipients[i]) && !exists<coin::CoinStore<CoinType>>(recipients[i]) && global<account::Account>(recipients[i]).guid_creation_num + 2 > MAX_U64;
 
@@ -992,7 +1083,6 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
pragma verify = false;
 let account_addr_source = signer::address_of(from);
-requires account_addr_source != to;
 include CreateAccountTransferAbortsIf;
 include WithdrawAbortsIf<CoinType>;
 include GuidAbortsIf<CoinType>;
@@ -1032,6 +1122,54 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to
 
 
 
+
+
+### Function `batch_transfer_fungible_assets`
+
+
+
public entry fun batch_transfer_fungible_assets(from: &signer, metadata: object::Object<fungible_asset::Metadata>, recipients: vector<address>, amounts: vector<u64>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `transfer_fungible_assets` + + +
public entry fun transfer_fungible_assets(from: &signer, metadata: object::Object<fungible_asset::Metadata>, to: address, amount: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `deposit_fungible_assets` + + +
public fun deposit_fungible_assets(to: address, fa: fungible_asset::FungibleAsset)
+
+ + + + +
pragma verify = false;
+
+ + + ### Function `assert_account_exists` @@ -1043,7 +1181,7 @@ Limit the address of auth_key is not @vm_reserved / @aptos_framework / @aptos_to -
aborts_if !account::exists_at(addr);
+
aborts_if !account::spec_exists_at(addr);
 
@@ -1062,8 +1200,6 @@ Check if the AptosCoin under the address existed.
pragma aborts_if_is_partial;
-aborts_if !account::exists_at(addr);
-aborts_if !coin::spec_is_account_registered<AptosCoin>(addr);
 
@@ -1127,7 +1263,7 @@ Check if the AptosCoin under the address existed. ### Function `fungible_transfer_only` -
fun fungible_transfer_only(source: &signer, to: address, amount: u64)
+
public(friend) entry fun fungible_transfer_only(source: &signer, to: address, amount: u64)
 
@@ -1154,12 +1290,12 @@ Check if the AptosCoin under the address existed. - + -### Function `burn_from_fungible_store` +### Function `burn_from_fungible_store_for_gas` -
public(friend) fun burn_from_fungible_store(ref: &fungible_asset::BurnRef, account: address, amount: u64)
+
public(friend) fun burn_from_fungible_store_for_gas(ref: &fungible_asset::BurnRef, account: address, amount: u64)
 
@@ -1176,8 +1312,8 @@ Check if the AptosCoin under the address existed.
schema CreateAccountTransferAbortsIf {
     to: address;
-    aborts_if !account::exists_at(to) && length_judgment(to);
-    aborts_if !account::exists_at(to) && (to == @vm_reserved || to == @aptos_framework || to == @aptos_token);
+    aborts_if !account::spec_exists_at(to) && length_judgment(to);
+    aborts_if !account::spec_exists_at(to) && (to == @vm_reserved || to == @aptos_framework || to == @aptos_token);
 }
 
@@ -1208,8 +1344,8 @@ Check if the AptosCoin under the address existed.
schema GuidAbortsIf<CoinType> {
     to: address;
     let acc = global<account::Account>(to);
-    aborts_if account::exists_at(to) && !exists<coin::CoinStore<CoinType>>(to) && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
-    aborts_if account::exists_at(to) && !exists<coin::CoinStore<CoinType>>(to) && acc.guid_creation_num + 2 > MAX_U64;
+    aborts_if account::spec_exists_at(to) && !exists<coin::CoinStore<CoinType>>(to) && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM;
+    aborts_if account::spec_exists_at(to) && !exists<coin::CoinStore<CoinType>>(to) && acc.guid_creation_num + 2 > MAX_U64;
 }
 
@@ -1221,7 +1357,6 @@ Check if the AptosCoin under the address existed.
schema RegistCoinAbortsIf<CoinType> {
     to: address;
-    aborts_if !coin::spec_is_account_registered<CoinType>(to) && !type_info::spec_is_struct<CoinType>();
     aborts_if exists<aptos_framework::account::Account>(to);
     aborts_if type_info::type_of<CoinType>() != type_info::type_of<AptosCoin>();
 }
diff --git a/aptos-move/framework/aptos-framework/doc/aptos_coin.md b/aptos-move/framework/aptos-framework/doc/aptos_coin.md
index a2e9c548a67cb..14dc44ea1909d 100644
--- a/aptos-move/framework/aptos-framework/doc/aptos_coin.md
+++ b/aptos-move/framework/aptos-framework/doc/aptos_coin.md
@@ -17,7 +17,6 @@ modified from https://github.com/move-language/move/tree/main/language/documenta
 -  [Function `destroy_mint_cap`](#0x1_aptos_coin_destroy_mint_cap)
 -  [Function `configure_accounts_for_test`](#0x1_aptos_coin_configure_accounts_for_test)
 -  [Function `mint`](#0x1_aptos_coin_mint)
--  [Function `destroy_mint_capability_from`](#0x1_aptos_coin_destroy_mint_capability_from)
 -  [Function `delegate_mint_capability`](#0x1_aptos_coin_delegate_mint_capability)
 -  [Function `claim_mint_capability`](#0x1_aptos_coin_claim_mint_capability)
 -  [Function `find_delegation`](#0x1_aptos_coin_find_delegation)
@@ -210,8 +209,8 @@ Can only called during genesis to initialize the Aptos coin.
 
     let (burn_cap, freeze_cap, mint_cap) = coin::initialize_with_parallelizable_supply<AptosCoin>(
         aptos_framework,
-        string::utf8(b"Move Coin"),
-        string::utf8(b"MOVE"),
+        string::utf8(b"Aptos Coin"),
+        string::utf8(b"APT"),
         8, // decimals
         true, // monitor_supply
     );
@@ -261,7 +260,7 @@ Only called during genesis to destroy the aptos framework account's mint capabil
 and accounts have been initialized during genesis.
 
 
-
public(friend) fun destroy_mint_cap(account: &signer)
+
public(friend) fun destroy_mint_cap(aptos_framework: &signer)
 
@@ -270,8 +269,8 @@ and accounts have been initialized during genesis. Implementation -
public(friend) fun destroy_mint_cap(account: &signer) acquires MintCapStore {
-    system_addresses::assert_aptos_framework(account);
+
public(friend) fun destroy_mint_cap(aptos_framework: &signer) acquires MintCapStore {
+    system_addresses::assert_aptos_framework(aptos_framework);
     let MintCapStore { mint_cap } = move_from<MintCapStore>(@aptos_framework);
     coin::destroy_mint_cap(mint_cap);
 }
@@ -359,33 +358,6 @@ Create new coins and deposit them into dst_addr's account.
 
 
 
-
-
-
-
-## Function `destroy_mint_capability_from`
-
-Desroy the mint capability from the account.
-
-
-
public fun destroy_mint_capability_from(account: &signer, from: address)
-
- - - -
-Implementation - - -
public fun destroy_mint_capability_from(account: &signer, from: address) acquires MintCapStore {
-    system_addresses::assert_aptos_framework(account);
-    let MintCapStore { mint_cap } = move_from<MintCapStore>(from);
-    coin::destroy_mint_cap(mint_cap);
-}
-
- - -
@@ -406,11 +378,8 @@ Create delegated token for the address so the account could claim MintCapability
public entry fun delegate_mint_capability(account: signer, to: address) acquires Delegations {
-    system_addresses::assert_aptos_framework(&account);
-    let delegations = &mut borrow_global_mut<Delegations>(@aptos_framework).inner;
-    if (!exists<Delegations>(signer::address_of(&account))) {
-      move_to(&account, Delegations { inner: vector[] });
-    };
+    system_addresses::assert_core_resource(&account);
+    let delegations = &mut borrow_global_mut<Delegations>(@core_resources).inner;
     vector::for_each_ref(delegations, |element| {
         let element: &DelegatedMintCapability = element;
         assert!(element.to != to, error::invalid_argument(EALREADY_DELEGATED));
@@ -444,11 +413,11 @@ Claim the delegated mint capability and destroy the delegated token.
     let maybe_index = find_delegation(signer::address_of(account));
     assert!(option::is_some(&maybe_index), EDELEGATION_NOT_FOUND);
     let idx = *option::borrow(&maybe_index);
-    let delegations = &mut borrow_global_mut<Delegations>(@aptos_framework).inner;
+    let delegations = &mut borrow_global_mut<Delegations>(@core_resources).inner;
     let DelegatedMintCapability { to: _ } = vector::swap_remove(delegations, idx);
 
     // Make a copy of mint cap and give it to the specified account.
-    let mint_cap = borrow_global<MintCapStore>(@aptos_framework).mint_cap;
+    let mint_cap = borrow_global<MintCapStore>(@core_resources).mint_cap;
     move_to(account, MintCapStore { mint_cap });
 }
 
@@ -473,7 +442,7 @@ Claim the delegated mint capability and destroy the delegated token.
fun find_delegation(addr: address): Option<u64> acquires Delegations {
-    let delegations = &borrow_global<Delegations>(@aptos_framework).inner;
+    let delegations = &borrow_global<Delegations>(@core_resources).inner;
     let i = 0;
     let len = vector::length(delegations);
     let index = option::none();
@@ -544,7 +513,7 @@ Claim the delegated mint capability and destroy the delegated token.
 
 
 
pragma verify = true;
-pragma aborts_if_is_strict;
+pragma aborts_if_is_partial;
 
@@ -560,10 +529,12 @@ Claim the delegated mint capability and destroy the delegated token. -
let addr = signer::address_of(aptos_framework);
+
pragma verify = false;
+aborts_if permissioned_signer::spec_is_permissioned_signer(aptos_framework);
+let addr = signer::address_of(aptos_framework);
 aborts_if addr != @aptos_framework;
-aborts_if !string::spec_internal_check_utf8(b"Move Coin");
-aborts_if !string::spec_internal_check_utf8(b"MOVE");
+aborts_if !string::spec_internal_check_utf8(b"Aptos Coin");
+aborts_if !string::spec_internal_check_utf8(b"APT");
 aborts_if exists<MintCapStore>(addr);
 aborts_if exists<coin::CoinInfo<AptosCoin>>(addr);
 aborts_if !exists<aggregator_factory::AggregatorFactory>(addr);
@@ -583,13 +554,13 @@ Claim the delegated mint capability and destroy the delegated token.
 ### Function `destroy_mint_cap`
 
 
-
public(friend) fun destroy_mint_cap(account: &signer)
+
public(friend) fun destroy_mint_cap(aptos_framework: &signer)
 
-
let addr = signer::address_of(account);
+
let addr = signer::address_of(aptos_framework);
 aborts_if addr != @aptos_framework;
 aborts_if !exists<MintCapStore>(@aptos_framework);
 
diff --git a/aptos-move/framework/aptos-framework/doc/aptos_governance.md b/aptos-move/framework/aptos-framework/doc/aptos_governance.md index 064d68833fd36..9acabaa0da98a 100644 --- a/aptos-move/framework/aptos-framework/doc/aptos_governance.md +++ b/aptos-move/framework/aptos-framework/doc/aptos_governance.md @@ -29,7 +29,10 @@ on a proposal multiple times as long as the total voting power of these votes do - [Struct `CreateProposal`](#0x1_aptos_governance_CreateProposal) - [Struct `Vote`](#0x1_aptos_governance_Vote) - [Struct `UpdateConfig`](#0x1_aptos_governance_UpdateConfig) +- [Struct `GovernancePermission`](#0x1_aptos_governance_GovernancePermission) - [Constants](#@Constants_0) +- [Function `check_governance_permission`](#0x1_aptos_governance_check_governance_permission) +- [Function `grant_permission`](#0x1_aptos_governance_grant_permission) - [Function `store_signer_cap`](#0x1_aptos_governance_store_signer_cap) - [Function `initialize`](#0x1_aptos_governance_initialize) - [Function `update_governance_config`](#0x1_aptos_governance_update_governance_config) @@ -39,6 +42,7 @@ on a proposal multiple times as long as the total voting power of these votes do - [Function `get_required_proposer_stake`](#0x1_aptos_governance_get_required_proposer_stake) - [Function `has_entirely_voted`](#0x1_aptos_governance_has_entirely_voted) - [Function `get_remaining_voting_power`](#0x1_aptos_governance_get_remaining_voting_power) +- [Function `assert_proposal_expiration`](#0x1_aptos_governance_assert_proposal_expiration) - [Function `create_proposal`](#0x1_aptos_governance_create_proposal) - [Function `create_proposal_v2`](#0x1_aptos_governance_create_proposal_v2) - [Function `create_proposal_v2_impl`](#0x1_aptos_governance_create_proposal_v2_impl) @@ -61,7 +65,6 @@ on a proposal multiple times as long as the total voting power of these votes do - [Function `get_signer`](#0x1_aptos_governance_get_signer) - [Function `create_proposal_metadata`](#0x1_aptos_governance_create_proposal_metadata) - [Function `assert_voting_initialization`](#0x1_aptos_governance_assert_voting_initialization) -- [Function `initialize_for_verification`](#0x1_aptos_governance_initialize_for_verification) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) @@ -74,6 +77,7 @@ on a proposal multiple times as long as the total voting power of these votes do - [Function `get_required_proposer_stake`](#@Specification_1_get_required_proposer_stake) - [Function `has_entirely_voted`](#@Specification_1_has_entirely_voted) - [Function `get_remaining_voting_power`](#@Specification_1_get_remaining_voting_power) + - [Function `assert_proposal_expiration`](#@Specification_1_assert_proposal_expiration) - [Function `create_proposal`](#@Specification_1_create_proposal) - [Function `create_proposal_v2`](#@Specification_1_create_proposal_v2) - [Function `create_proposal_v2_impl`](#@Specification_1_create_proposal_v2_impl) @@ -96,7 +100,6 @@ on a proposal multiple times as long as the total voting power of these votes do - [Function `get_signer`](#@Specification_1_get_signer) - [Function `create_proposal_metadata`](#@Specification_1_create_proposal_metadata) - [Function `assert_voting_initialization`](#@Specification_1_assert_voting_initialization) - - [Function `initialize_for_verification`](#@Specification_1_initialize_for_verification)
use 0x1::account;
@@ -109,6 +112,7 @@ on a proposal multiple times as long as the total voting power of these votes do
 use 0x1::governance_proposal;
 use 0x1::math64;
 use 0x1::option;
+use 0x1::permissioned_signer;
 use 0x1::randomness_config;
 use 0x1::reconfiguration_with_dkg;
 use 0x1::signer;
@@ -642,6 +646,33 @@ Event emitted when the governance configs are updated.
 
 
 
+
+
+
+
+## Struct `GovernancePermission`
+
+
+
+
struct GovernancePermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ +
@@ -738,6 +769,16 @@ The proposal in the argument is not a partial voting proposal. + + +Current permissioned signer cannot perform governance operations. + + +
const ENO_GOVERNANCE_PERMISSION: u64 = 16;
+
+ + + The specified stake pool must be part of the validator set @@ -758,6 +799,16 @@ Partial voting feature hasn't been properly initialized. + + +The proposal has expired. + + +
const EPROPOSAL_EXPIRED: u64 = 15;
+
+ + + Proposal is not ready to be resolved. Waiting on time or votes @@ -817,6 +868,59 @@ Proposal metadata attribute keys. + + +## Function `check_governance_permission` + +Permissions + + +
fun check_governance_permission(s: &signer)
+
+ + + +
+Implementation + + +
inline fun check_governance_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, GovernancePermission {}),
+        error::permission_denied(ENO_GOVERNANCE_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `grant_permission` + +Grant permission to perform governance operations on behalf of the master signer. + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, GovernancePermission {})
+}
+
+ + + +
+ ## Function `store_signer_cap` @@ -885,6 +989,7 @@ This function is private because it's called directly from the vm. system_addresses::assert_aptos_framework(aptos_framework); voting::register<GovernanceProposal>(aptos_framework); + initialize_partial_voting(aptos_framework); move_to(aptos_framework, GovernanceConfig { voting_duration_secs, min_voting_threshold, @@ -946,16 +1051,17 @@ AptosGovernance. voting_duration_secs }, ) + } else { + let events = borrow_global_mut<GovernanceEvents>(@aptos_framework); + event::emit_event<UpdateConfigEvent>( + &mut events.update_config_events, + UpdateConfigEvent { + min_voting_threshold, + required_proposer_stake, + voting_duration_secs + }, + ); }; - let events = borrow_global_mut<GovernanceEvents>(@aptos_framework); - event::emit_event<UpdateConfigEvent>( - &mut events.update_config_events, - UpdateConfigEvent { - min_voting_threshold, - required_proposer_stake, - voting_duration_secs - }, - ); }
@@ -1147,17 +1253,50 @@ Note: a stake pool's voting power on a proposal could increase over time(e.g. re stake_pool, proposal_id, }; - let used_voting_power = 0u64; - if (features::partial_governance_voting_enabled()) { - let voting_records_v2 = borrow_global<VotingRecordsV2>(@aptos_framework); - used_voting_power = *smart_table::borrow_with_default(&voting_records_v2.votes, record_key, &0); - }; + let used_voting_power = *VotingRecordsV2[@aptos_framework].votes.borrow_with_default(record_key, &0); get_voting_power(stake_pool) - used_voting_power }
+ + + + +## Function `assert_proposal_expiration` + + + +
public fun assert_proposal_expiration(stake_pool: address, proposal_id: u64)
+
+ + + +
+Implementation + + +
public fun assert_proposal_expiration(stake_pool: address, proposal_id: u64) {
+    assert_voting_initialization();
+    let proposal_expiration = voting::get_proposal_expiration_secs<GovernanceProposal>(
+        @aptos_framework,
+        proposal_id
+    );
+    // The voter's stake needs to be locked up at least as long as the proposal's expiration.
+    assert!(
+        proposal_expiration <= stake::get_lockup_secs(stake_pool),
+        error::invalid_argument(EINSUFFICIENT_STAKE_LOCKUP),
+    );
+    assert!(
+        timestamp::now_seconds() <= proposal_expiration,
+        error::invalid_argument(EPROPOSAL_EXPIRED),
+    );
+}
+
+ + +
@@ -1261,6 +1400,7 @@ Return proposal_id when a proposal is successfully created. metadata_hash: vector<u8>, is_multi_step_proposal: bool, ): u64 acquires GovernanceConfig, GovernanceEvents { + check_governance_permission(proposer); let proposer_address = signer::address_of(proposer); assert!( stake::get_delegated_voter(stake_pool) == proposer_address, @@ -1320,18 +1460,19 @@ Return proposal_id when a proposal is successfully created. proposal_metadata, }, ); + } else { + let events = borrow_global_mut<GovernanceEvents>(@aptos_framework); + event::emit_event<CreateProposalEvent>( + &mut events.create_proposal_events, + CreateProposalEvent { + proposal_id, + proposer: proposer_address, + stake_pool, + execution_hash, + proposal_metadata, + }, + ); }; - let events = borrow_global_mut<GovernanceEvents>(@aptos_framework); - event::emit_event<CreateProposalEvent>( - &mut events.create_proposal_events, - CreateProposalEvent { - proposal_id, - proposer: proposer_address, - stake_pool, - execution_hash, - proposal_metadata, - }, - ); proposal_id }
@@ -1492,18 +1633,11 @@ cannot vote on the proposal even after partial governance voting is enabled. voting_power: u64, should_pass: bool, ) acquires ApprovedExecutionHashes, VotingRecords, VotingRecordsV2, GovernanceEvents { + permissioned_signer::assert_master_signer(voter); let voter_address = signer::address_of(voter); assert!(stake::get_delegated_voter(stake_pool) == voter_address, error::invalid_argument(ENOT_DELEGATED_VOTER)); - // The voter's stake needs to be locked up at least as long as the proposal's expiration. - let proposal_expiration = voting::get_proposal_expiration_secs<GovernanceProposal>( - @aptos_framework, - proposal_id - ); - assert!( - stake::get_lockup_secs(stake_pool) >= proposal_expiration, - error::invalid_argument(EINSUFFICIENT_STAKE_LOCKUP), - ); + assert_proposal_expiration(stake_pool, proposal_id); // If a stake pool has already voted on a proposal before partial governance voting is enabled, // `get_remaining_voting_power` returns 0. @@ -1525,18 +1659,9 @@ cannot vote on the proposal even after partial governance voting is enabled. stake_pool, proposal_id, }; - if (features::partial_governance_voting_enabled()) { - let voting_records_v2 = borrow_global_mut<VotingRecordsV2>(@aptos_framework); - let used_voting_power = smart_table::borrow_mut_with_default(&mut voting_records_v2.votes, record_key, 0); - // This calculation should never overflow because the used voting cannot exceed the total voting power of this stake pool. - *used_voting_power = *used_voting_power + voting_power; - } else { - let voting_records = borrow_global_mut<VotingRecords>(@aptos_framework); - assert!( - !table::contains(&voting_records.votes, record_key), - error::invalid_argument(EALREADY_VOTED)); - table::add(&mut voting_records.votes, record_key, true); - }; + let used_voting_power = VotingRecordsV2[@aptos_framework].votes.borrow_mut_with_default(record_key, 0); + // This calculation should never overflow because the used voting cannot exceed the total voting power of this stake pool. + *used_voting_power += voting_power; if (std::features::module_event_migration_enabled()) { event::emit( @@ -1548,18 +1673,19 @@ cannot vote on the proposal even after partial governance voting is enabled. should_pass, }, ); + } else { + let events = &mut GovernanceEvents[@aptos_framework]; + event::emit_event( + &mut events.vote_events, + VoteEvent { + proposal_id, + voter: voter_address, + stake_pool, + num_votes: voting_power, + should_pass, + }, + ); }; - let events = borrow_global_mut<GovernanceEvents>(@aptos_framework); - event::emit_event<VoteEvent>( - &mut events.vote_events, - VoteEvent { - proposal_id, - voter: voter_address, - stake_pool, - num_votes: voting_power, - should_pass, - }, - ); let proposal_state = voting::get_proposal_state<GovernanceProposal>(@aptos_framework, proposal_id); if (proposal_state == PROPOSAL_STATE_SUCCEEDED) { @@ -1885,6 +2011,8 @@ Only called in testnet where the core resources account exists and has been gran
public fun get_signer_testnet_only(
     core_resources: &signer, signer_address: address): signer acquires GovernanceResponsbility {
     system_addresses::assert_core_resource(core_resources);
+    // Core resources account only has mint capability in tests/testnets.
+    assert!(aptos_coin::has_mint_capability(core_resources), error::unauthenticated(EUNAUTHORIZED));
     get_signer(signer_address)
 }
 
@@ -2004,39 +2132,7 @@ Return a signer for making changes to 0x1 as part of on-chain governance proposa
fun assert_voting_initialization() {
-    if (features::partial_governance_voting_enabled()) {
-        assert!(exists<VotingRecordsV2>(@aptos_framework), error::invalid_state(EPARTIAL_VOTING_NOT_INITIALIZED));
-    };
-}
-
- - - - - - - -## Function `initialize_for_verification` - - - -
#[verify_only]
-public fun initialize_for_verification(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
-
- - - -
-Implementation - - -
public fun initialize_for_verification(
-    aptos_framework: &signer,
-    min_voting_threshold: u128,
-    required_proposer_stake: u64,
-    voting_duration_secs: u64,
-) {
-    initialize(aptos_framework, min_voting_threshold, required_proposer_stake, voting_duration_secs);
+    assert!(exists<VotingRecordsV2>(@aptos_framework), error::invalid_state(EPARTIAL_VOTING_NOT_INITIALIZED));
 }
 
@@ -2102,8 +2198,21 @@ Return a signer for making changes to 0x1 as part of on-chain governance proposa ### Module-level Specification -
pragma verify = true;
-pragma aborts_if_is_strict;
+
pragma verify = false;
+pragma aborts_if_is_partial;
+
+ + + + + + + +
schema AbortsIfPermissionedSigner {
+    s: signer;
+    let perm = GovernancePermission {};
+    aborts_if !permissioned_signer::spec_check_permission_exists(s, perm);
+}
 
@@ -2146,12 +2255,10 @@ The signer must have an Account. Limit addition overflow. -
let addr = signer::address_of(aptos_framework);
+
pragma aborts_if_is_partial;
+let addr = signer::address_of(aptos_framework);
 let register_account = global<account::Account>(addr);
 aborts_if exists<voting::VotingForum<GovernanceProposal>>(addr);
-aborts_if !exists<account::Account>(addr);
-aborts_if register_account.guid_creation_num + 7 > MAX_U64;
-aborts_if register_account.guid_creation_num + 7 >= account::MAX_GUID_CREATION_NUM;
 aborts_if !type_info::spec_is_struct<GovernanceProposal>();
 include InitializeAbortIf;
 ensures exists<voting::VotingForum<governance_proposal::GovernanceProposal>>(addr);
@@ -2159,6 +2266,7 @@ Limit addition overflow.
 ensures exists<GovernanceEvents>(addr);
 ensures exists<VotingRecords>(addr);
 ensures exists<ApprovedExecutionHashes>(addr);
+ensures exists<VotingRecordsV2>(addr);
 
@@ -2181,7 +2289,9 @@ Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. let post new_governance_config = global<GovernanceConfig>(@aptos_framework); aborts_if addr != @aptos_framework; aborts_if !exists<GovernanceConfig>(@aptos_framework); -aborts_if !exists<GovernanceEvents>(@aptos_framework); +aborts_if !features::spec_is_enabled(features::MODULE_EVENT_MIGRATION) && !exists<GovernanceEvents>( + @aptos_framework +); modifies global<GovernanceConfig>(addr); ensures new_governance_config.voting_duration_secs == voting_duration_secs; ensures new_governance_config.min_voting_threshold == min_voting_threshold; @@ -2228,7 +2338,7 @@ Abort if structs have already been created. aborts_if exists<GovernanceEvents>(addr); aborts_if exists<VotingRecords>(addr); aborts_if exists<ApprovedExecutionHashes>(addr); - aborts_if !exists<account::Account>(addr); + aborts_if exists<VotingRecordsV2>(addr); }
@@ -2325,7 +2435,7 @@ Abort if structs have already been created. -
aborts_if features::spec_partial_governance_voting_enabled() && !exists<VotingRecordsV2>(@aptos_framework);
+
aborts_if !exists<VotingRecordsV2>(@aptos_framework);
 include voting::AbortsIfNotContainProposalID<GovernanceProposal> {
     voting_forum_address: @aptos_framework
 };
@@ -2352,8 +2462,7 @@ Abort if structs have already been created.
 } else {
     0
 };
-aborts_if !remain_zero_1_cond && !entirely_voted && features::spec_partial_governance_voting_enabled() &&
-    used_voting_power > 0 && voting_power < used_voting_power;
+aborts_if !remain_zero_1_cond && !entirely_voted && used_voting_power > 0 && voting_power < used_voting_power;
 ensures result == spec_get_remaining_voting_power(stake_pool, proposal_id);
 
@@ -2384,8 +2493,6 @@ Abort if structs have already been created. 0 } else if (entirely_voted) { 0 - } else if (!features::spec_partial_governance_voting_enabled()) { - voting_power } else { voting_power - used_voting_power } @@ -2425,6 +2532,28 @@ Abort if structs have already been created. + + +### Function `assert_proposal_expiration` + + +
public fun assert_proposal_expiration(stake_pool: address, proposal_id: u64)
+
+ + + + +
include VotingInitializationAbortIfs;
+include voting::AbortsIfNotContainProposalID<GovernanceProposal>{voting_forum_address: @aptos_framework};
+let proposal_expiration = voting::spec_get_proposal_expiration_secs<GovernanceProposal>(@aptos_framework, proposal_id);
+aborts_if !stake::stake_pool_exists(stake_pool);
+aborts_if proposal_expiration > stake::spec_get_lockup_secs(stake_pool);
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+aborts_if timestamp::now_seconds() > proposal_expiration;
+
+ + + ### Function `create_proposal` @@ -2479,6 +2608,80 @@ The same as spec of + + +
schema CreateProposalAbortsIf {
+    proposer: &signer;
+    stake_pool: address;
+    execution_hash: vector<u8>;
+    metadata_location: vector<u8>;
+    metadata_hash: vector<u8>;
+    include VotingGetDelegatedVoterAbortsIf { sign: proposer };
+    include AbortsIfNotGovernanceConfig;
+    include GetVotingPowerAbortsIf { pool_address: stake_pool };
+    let staking_config = global<staking_config::StakingConfig>(@aptos_framework);
+    let allow_validator_set_change = staking_config.allow_validator_set_change;
+    let stake_pool_res = global<stake::StakePool>(stake_pool);
+    let stake_balance_0 = stake_pool_res.active.value + stake_pool_res.pending_active.value + stake_pool_res.pending_inactive.value;
+    let stake_balance_1 = stake_pool_res.active.value + stake_pool_res.pending_inactive.value;
+    let stake_balance_2 = 0;
+    let governance_config = global<GovernanceConfig>(@aptos_framework);
+    let required_proposer_stake = governance_config.required_proposer_stake;
+    // This enforces high-level requirement 2:
+    aborts_if allow_validator_set_change && stake_balance_0 < required_proposer_stake;
+    aborts_if !allow_validator_set_change && stake::spec_is_current_epoch_validator(stake_pool) && stake_balance_1 < required_proposer_stake;
+    aborts_if !allow_validator_set_change && !stake::spec_is_current_epoch_validator(stake_pool) && stake_balance_2 < required_proposer_stake;
+    aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    let current_time = timestamp::spec_now_seconds();
+    let proposal_expiration = current_time + governance_config.voting_duration_secs;
+    aborts_if stake_pool_res.locked_until_secs < proposal_expiration;
+    include CreateProposalMetadataAbortsIf;
+    let addr = aptos_std::type_info::type_of<AptosCoin>().account_address;
+    aborts_if !exists<coin::CoinInfo<AptosCoin>>(addr);
+    let maybe_supply = global<coin::CoinInfo<AptosCoin>>(addr).supply;
+    let supply = option::spec_borrow(maybe_supply);
+    let total_supply = aptos_framework::optional_aggregator::optional_aggregator_value(supply);
+    let early_resolution_vote_threshold_value = total_supply / 2 + 1;
+    aborts_if option::spec_is_some(maybe_supply) && governance_config.min_voting_threshold > early_resolution_vote_threshold_value;
+    aborts_if len(execution_hash) == 0;
+    aborts_if !exists<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let proposal_id = voting_forum.next_proposal_id;
+    aborts_if proposal_id + 1 > MAX_U64;
+    let post post_voting_forum = global<voting::VotingForum<GovernanceProposal>>(@aptos_framework);
+    let post post_next_proposal_id = post_voting_forum.next_proposal_id;
+    ensures post_next_proposal_id == proposal_id + 1;
+    aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_KEY);
+    aborts_if !string::spec_internal_check_utf8(voting::IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY);
+    aborts_if table::spec_contains(voting_forum.proposals,proposal_id);
+    ensures table::spec_contains(post_voting_forum.proposals, proposal_id);
+    aborts_if !exists<GovernanceEvents>(@aptos_framework);
+}
+
+ + + + + + + +
schema VotingGetDelegatedVoterAbortsIf {
+    stake_pool: address;
+    sign: signer;
+    let addr = signer::address_of(sign);
+    let stake_pool_res = global<stake::StakePool>(stake_pool);
+    aborts_if !exists<stake::StakePool>(stake_pool);
+    aborts_if stake_pool_res.delegated_voter != addr;
+}
+
+ + @@ -2611,8 +2814,7 @@ Address @aptos_framework must exist VotingRecordsV2 if partial_governance_voting } else { 0 }; - aborts_if !remain_zero_1_cond && !entirely_voted && features::spec_partial_governance_voting_enabled() && - used_voting_power > 0 && spec_voting_power < used_voting_power; + aborts_if !remain_zero_1_cond && !entirely_voted && used_voting_power > 0 && spec_voting_power < used_voting_power; let remaining_power = spec_get_remaining_voting_power(stake_pool, proposal_id); let real_voting_power = min(voting_power, remaining_power); aborts_if !(real_voting_power > 0); @@ -2641,8 +2843,7 @@ Address @aptos_framework must exist VotingRecordsV2 if partial_governance_voting let key = utf8(voting::RESOLVABLE_TIME_METADATA_KEY); ensures simple_map::spec_contains_key(post_proposal.metadata, key); ensures simple_map::spec_get(post_proposal.metadata, key) == std::bcs::to_bytes(timestamp::now_seconds()); - aborts_if features::spec_partial_governance_voting_enabled() && used_voting_power + real_voting_power > MAX_U64; - aborts_if !features::spec_partial_governance_voting_enabled() && table::spec_contains(voting_records.votes, record_key); + aborts_if used_voting_power + real_voting_power > MAX_U64; aborts_if !exists<GovernanceEvents>(@aptos_framework); let early_resolution_threshold = option::spec_borrow(proposal.early_resolution_vote_threshold); let is_voting_period_over = timestamp::spec_now_seconds() > proposal_expiration; @@ -2697,7 +2898,7 @@ Address @aptos_framework must exist VotingRecordsV2 if partial_governance_voting }; ensures proposal_state_successed ==> simple_map::spec_contains_key(post_approved_hashes.hashes, proposal_id) && simple_map::spec_get(post_approved_hashes.hashes, proposal_id) == execution_hash; - aborts_if features::spec_partial_governance_voting_enabled() && !exists<VotingRecordsV2>(@aptos_framework); + aborts_if !exists<VotingRecordsV2>(@aptos_framework); }
@@ -2927,9 +3128,7 @@ Address @aptos_framework must exist ApprovedExecutionHashes and GovernancePropos framework: aptos_framework }; include stake::GetReconfigStartTimeRequirement; -include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires chain_status::is_operating(); -requires exists<stake::ValidatorFees>(@aptos_framework); requires exists<CoinInfo<AptosCoin>>(@aptos_framework); requires exists<staking_config::StakingRewardsConfig>(@aptos_framework); include staking_config::StakingRewardsConfigRequirement; @@ -2962,7 +3161,7 @@ Address @aptos_framework must exist ApprovedExecutionHashes and GovernancePropos
schema VotingInitializationAbortIfs {
-    aborts_if features::spec_partial_governance_voting_enabled() && !exists<VotingRecordsV2>(@aptos_framework);
+    aborts_if !exists<VotingRecordsV2>(@aptos_framework);
 }
 
@@ -3004,9 +3203,7 @@ Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. framework: aptos_framework }; include stake::GetReconfigStartTimeRequirement; -include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires chain_status::is_operating(); -requires exists<stake::ValidatorFees>(@aptos_framework); requires exists<CoinInfo<AptosCoin>>(@aptos_framework); requires exists<staking_config::StakingRewardsConfig>(@aptos_framework); include staking_config::StakingRewardsConfigRequirement; @@ -3163,22 +3360,4 @@ pool_address must exist in StakePool.
- - - -### Function `initialize_for_verification` - - -
#[verify_only]
-public fun initialize_for_verification(aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64)
-
- - -verify_only - - -
pragma verify = false;
-
- - [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/atomic_bridge.md b/aptos-move/framework/aptos-framework/doc/atomic_bridge.md deleted file mode 100644 index 2c36f8755ac1a..0000000000000 --- a/aptos-move/framework/aptos-framework/doc/atomic_bridge.md +++ /dev/null @@ -1,2876 +0,0 @@ - - - -# Module `0x1::atomic_bridge_store` - - - -- [Struct `AddressPair`](#0x1_atomic_bridge_store_AddressPair) -- [Resource `SmartTableWrapper`](#0x1_atomic_bridge_store_SmartTableWrapper) -- [Struct `BridgeTransferDetails`](#0x1_atomic_bridge_store_BridgeTransferDetails) -- [Resource `Nonce`](#0x1_atomic_bridge_store_Nonce) -- [Constants](#@Constants_0) -- [Function `initialize`](#0x1_atomic_bridge_store_initialize) -- [Function `now`](#0x1_atomic_bridge_store_now) -- [Function `create_time_lock`](#0x1_atomic_bridge_store_create_time_lock) -- [Function `create_details`](#0x1_atomic_bridge_store_create_details) -- [Function `add`](#0x1_atomic_bridge_store_add) -- [Function `assert_min_time_lock`](#0x1_atomic_bridge_store_assert_min_time_lock) -- [Function `assert_pending`](#0x1_atomic_bridge_store_assert_pending) -- [Function `assert_valid_hash_lock`](#0x1_atomic_bridge_store_assert_valid_hash_lock) -- [Function `assert_valid_bridge_transfer_id`](#0x1_atomic_bridge_store_assert_valid_bridge_transfer_id) -- [Function `create_hashlock`](#0x1_atomic_bridge_store_create_hashlock) -- [Function `assert_correct_hash_lock`](#0x1_atomic_bridge_store_assert_correct_hash_lock) -- [Function `assert_timed_out_lock`](#0x1_atomic_bridge_store_assert_timed_out_lock) -- [Function `assert_within_timelock`](#0x1_atomic_bridge_store_assert_within_timelock) -- [Function `complete`](#0x1_atomic_bridge_store_complete) -- [Function `cancel`](#0x1_atomic_bridge_store_cancel) -- [Function `complete_details`](#0x1_atomic_bridge_store_complete_details) -- [Function `complete_transfer`](#0x1_atomic_bridge_store_complete_transfer) -- [Function `cancel_details`](#0x1_atomic_bridge_store_cancel_details) -- [Function `cancel_transfer`](#0x1_atomic_bridge_store_cancel_transfer) -- [Function `bridge_transfer_id`](#0x1_atomic_bridge_store_bridge_transfer_id) -- [Function `get_bridge_transfer_details_initiator`](#0x1_atomic_bridge_store_get_bridge_transfer_details_initiator) -- [Function `get_bridge_transfer_details_counterparty`](#0x1_atomic_bridge_store_get_bridge_transfer_details_counterparty) -- [Function `get_bridge_transfer_details`](#0x1_atomic_bridge_store_get_bridge_transfer_details) -- [Specification](#@Specification_1) - - [Function `initialize`](#@Specification_1_initialize) - - [Function `create_time_lock`](#@Specification_1_create_time_lock) - - [Function `create_details`](#@Specification_1_create_details) - - [Function `add`](#@Specification_1_add) - - [Function `create_hashlock`](#@Specification_1_create_hashlock) - - [Function `complete`](#@Specification_1_complete) - - [Function `cancel`](#@Specification_1_cancel) - - [Function `complete_details`](#@Specification_1_complete_details) - - [Function `complete_transfer`](#@Specification_1_complete_transfer) - - [Function `cancel_details`](#@Specification_1_cancel_details) - - -
use 0x1::aptos_hash;
-use 0x1::bcs;
-use 0x1::ethereum;
-use 0x1::features;
-use 0x1::smart_table;
-use 0x1::system_addresses;
-use 0x1::timestamp;
-use 0x1::vector;
-
- - - - - -## Struct `AddressPair` - - - -
struct AddressPair<Initiator: store, Recipient: store> has copy, store
-
- - - -
-Fields - - -
-
-initiator: Initiator -
-
- -
-
-recipient: Recipient -
-
- -
-
- - -
- - - -## Resource `SmartTableWrapper` - -A smart table wrapper - - -
struct SmartTableWrapper<K, V> has store, key
-
- - - -
-Fields - - -
-
-inner: smart_table::SmartTable<K, V> -
-
- -
-
- - -
- - - -## Struct `BridgeTransferDetails` - -Details on the transfer - - -
struct BridgeTransferDetails<Initiator: store, Recipient: store> has copy, store
-
- - - -
-Fields - - -
-
-addresses: atomic_bridge_store::AddressPair<Initiator, Recipient> -
-
- -
-
-amount: u64 -
-
- -
-
-hash_lock: vector<u8> -
-
- -
-
-time_lock: u64 -
-
- -
-
-state: u8 -
-
- -
-
- - -
- - - -## Resource `Nonce` - - - -
struct Nonce has key
-
- - - -
-Fields - - -
-
-inner: u64 -
-
- -
-
- - -
- - - -## Constants - - - - - - -
const MAX_U64: u64 = 18446744073709551615;
-
- - - - - - - -
const EATOMIC_BRIDGE_NOT_ENABLED: u64 = 9;
-
- - - - - - - -
const CANCELLED_TRANSACTION: u8 = 3;
-
- - - - - - - -
const COMPLETED_TRANSACTION: u8 = 2;
-
- - - - - - - -
const EEXPIRED: u64 = 3;
-
- - - - - - - -
const EINVALID_BRIDGE_TRANSFER_ID: u64 = 8;
-
- - - - - - - -
const EINVALID_HASH_LOCK: u64 = 5;
-
- - - - - -Error codes - - -
const EINVALID_PRE_IMAGE: u64 = 1;
-
- - - - - - - -
const EINVALID_TIME_LOCK: u64 = 6;
-
- - - - - - - -
const ENOT_EXPIRED: u64 = 4;
-
- - - - - - - -
const ENOT_PENDING_TRANSACTION: u64 = 2;
-
- - - - - - - -
const EZERO_AMOUNT: u64 = 7;
-
- - - - - -Minimum time lock of 1 second - - -
const MIN_TIME_LOCK: u64 = 1;
-
- - - - - -Transaction states - - -
const PENDING_TRANSACTION: u8 = 1;
-
- - - - - -## Function `initialize` - -Initializes the initiators and counterparties tables and nonce. - -@param aptos_framework The signer for Aptos framework. - - -
public fun initialize(aptos_framework: &signer)
-
- - - -
-Implementation - - -
public fun initialize(aptos_framework: &signer) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    move_to(aptos_framework, Nonce {
-        inner: 0,
-    });
-
-    let initiators = SmartTableWrapper<vector<u8>, BridgeTransferDetails<address, EthereumAddress>> {
-        inner: smart_table::new(),
-    };
-
-    move_to(aptos_framework, initiators);
-
-    let counterparties = SmartTableWrapper<vector<u8>, BridgeTransferDetails<EthereumAddress, address>> {
-        inner: smart_table::new(),
-    };
-
-    move_to(aptos_framework, counterparties);
-}
-
- - - -
- - - -## Function `now` - -Returns the current time in seconds. - -@return Current timestamp in seconds. - - -
fun now(): u64
-
- - - -
-Implementation - - -
fun now() : u64 {
-    timestamp::now_seconds()
-}
-
- - - -
- - - -## Function `create_time_lock` - -Creates a time lock by adding a duration to the current time. - -@param lock The duration to lock. -@return The calculated time lock. -@abort If lock is not above MIN_TIME_LOCK - - -
public(friend) fun create_time_lock(time_lock: u64): u64
-
- - - -
-Implementation - - -
public(friend) fun create_time_lock(time_lock: u64) : u64 {
-    assert_min_time_lock(time_lock);
-    now() + time_lock
-}
-
- - - -
- - - -## Function `create_details` - -Creates bridge transfer details with validation. - -@param initiator The initiating party of the transfer. -@param recipient The receiving party of the transfer. -@param amount The amount to be transferred. -@param hash_lock The hash lock for the transfer. -@param time_lock The time lock for the transfer. -@return A BridgeTransferDetails object. -@abort If the amount is zero or locks are invalid. - - -
public(friend) fun create_details<Initiator: store, Recipient: store>(initiator: Initiator, recipient: Recipient, amount: u64, hash_lock: vector<u8>, time_lock: u64): atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>
-
- - - -
-Implementation - - -
public(friend) fun create_details<Initiator: store, Recipient: store>(initiator: Initiator, recipient: Recipient, amount: u64, hash_lock: vector<u8>, time_lock: u64)
-    : BridgeTransferDetails<Initiator, Recipient> {
-    assert!(amount > 0, EZERO_AMOUNT);
-    assert_valid_hash_lock(&hash_lock);
-    time_lock = create_time_lock(time_lock);
-
-    BridgeTransferDetails {
-        addresses: AddressPair {
-            initiator,
-            recipient
-        },
-        amount,
-        hash_lock,
-        time_lock,
-        state: PENDING_TRANSACTION,
-    }
-}
-
- - - -
- - - -## Function `add` - -Record details of a transfer - -@param bridge_transfer_id Bridge transfer ID. -@param details The bridge transfer details - - -
public(friend) fun add<Initiator: store, Recipient: store>(bridge_transfer_id: vector<u8>, details: atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - -
-Implementation - - -
public(friend) fun add<Initiator: store, Recipient: store>(bridge_transfer_id: vector<u8>, details: BridgeTransferDetails<Initiator, Recipient>) acquires SmartTableWrapper {
-    assert!(features::abort_atomic_bridge_enabled(), EATOMIC_BRIDGE_NOT_ENABLED);
-
-    assert_valid_bridge_transfer_id(&bridge_transfer_id);
-    let table = borrow_global_mut<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework);
-    smart_table::add(&mut table.inner, bridge_transfer_id, details);
-}
-
- - - -
- - - -## Function `assert_min_time_lock` - -Asserts that the time lock is valid. - -@param time_lock -@abort If the time lock is invalid. - - -
fun assert_min_time_lock(time_lock: u64)
-
- - - -
-Implementation - - -
fun assert_min_time_lock(time_lock: u64) {
-    assert!(time_lock >= MIN_TIME_LOCK, EINVALID_TIME_LOCK);
-}
-
- - - -
- - - -## Function `assert_pending` - -Asserts that the details state is pending. - -@param details The bridge transfer details to check. -@abort If the state is not pending. - - -
fun assert_pending<Initiator: store, Recipient: store>(details: &atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - -
-Implementation - - -
fun assert_pending<Initiator: store, Recipient: store>(details: &BridgeTransferDetails<Initiator, Recipient>) {
-    assert!(details.state == PENDING_TRANSACTION, ENOT_PENDING_TRANSACTION)
-}
-
- - - -
- - - -## Function `assert_valid_hash_lock` - -Asserts that the hash lock is valid. - -@param hash_lock The hash lock to validate. -@abort If the hash lock is invalid. - - -
fun assert_valid_hash_lock(hash_lock: &vector<u8>)
-
- - - -
-Implementation - - -
fun assert_valid_hash_lock(hash_lock: &vector<u8>) {
-    assert!(vector::length(hash_lock) == 32, EINVALID_HASH_LOCK);
-}
-
- - - -
- - - -## Function `assert_valid_bridge_transfer_id` - -Asserts that the bridge transfer ID is valid. - -@param bridge_transfer_id The bridge transfer ID to validate. -@abort If the ID is invalid. - - -
public(friend) fun assert_valid_bridge_transfer_id(bridge_transfer_id: &vector<u8>)
-
- - - -
-Implementation - - -
public(friend) fun assert_valid_bridge_transfer_id(bridge_transfer_id: &vector<u8>) {
-    assert!(vector::length(bridge_transfer_id) == 32, EINVALID_BRIDGE_TRANSFER_ID);
-}
-
- - - -
- - - -## Function `create_hashlock` - -Creates a hash lock from a pre-image. - -@param pre_image The pre-image to hash. -@return The generated hash lock. - - -
public(friend) fun create_hashlock(pre_image: vector<u8>): vector<u8>
-
- - - -
-Implementation - - -
public(friend) fun create_hashlock(pre_image: vector<u8>) : vector<u8> {
-    assert!(vector::length(&pre_image) > 0, EINVALID_PRE_IMAGE);
-    keccak256(pre_image)
-}
-
- - - -
- - - -## Function `assert_correct_hash_lock` - -Asserts that the hash lock matches the expected value. - -@param details The bridge transfer details. -@param hash_lock The hash lock to compare. -@abort If the hash lock is incorrect. - - -
fun assert_correct_hash_lock<Initiator: store, Recipient: store>(details: &atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>, hash_lock: vector<u8>)
-
- - - -
-Implementation - - -
fun assert_correct_hash_lock<Initiator: store, Recipient: store>(details: &BridgeTransferDetails<Initiator, Recipient>, hash_lock: vector<u8>) {
-    assert!(&hash_lock == &details.hash_lock, EINVALID_PRE_IMAGE);
-}
-
- - - -
- - - -## Function `assert_timed_out_lock` - -Asserts that the time lock has expired. - -@param details The bridge transfer details. -@abort If the time lock has not expired. - - -
fun assert_timed_out_lock<Initiator: store, Recipient: store>(details: &atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - -
-Implementation - - -
fun assert_timed_out_lock<Initiator: store, Recipient: store>(details: &BridgeTransferDetails<Initiator, Recipient>) {
-    assert!(now() > details.time_lock, ENOT_EXPIRED);
-}
-
- - - -
- - - -## Function `assert_within_timelock` - -Asserts we are still within the timelock. - -@param details The bridge transfer details. -@abort If the time lock has expired. - - -
fun assert_within_timelock<Initiator: store, Recipient: store>(details: &atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - -
-Implementation - - -
fun assert_within_timelock<Initiator: store, Recipient: store>(details: &BridgeTransferDetails<Initiator, Recipient>) {
-    assert!(!(now() > details.time_lock), EEXPIRED);
-}
-
- - - -
- - - -## Function `complete` - -Completes the bridge transfer. - -@param details The bridge transfer details to complete. - - -
fun complete<Initiator: store, Recipient: store>(details: &mut atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - -
-Implementation - - -
fun complete<Initiator: store, Recipient: store>(details: &mut BridgeTransferDetails<Initiator, Recipient>) {
-    details.state = COMPLETED_TRANSACTION;
-}
-
- - - -
- - - -## Function `cancel` - -Cancels the bridge transfer. - -@param details The bridge transfer details to cancel. - - -
fun cancel<Initiator: store, Recipient: store>(details: &mut atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - -
-Implementation - - -
fun cancel<Initiator: store, Recipient: store>(details: &mut BridgeTransferDetails<Initiator, Recipient>) {
-    details.state = CANCELLED_TRANSACTION;
-}
-
- - - -
- - - -## Function `complete_details` - -Validates and completes a bridge transfer by confirming the hash lock and state. - -@param hash_lock The hash lock used to validate the transfer. -@param details The mutable reference to the bridge transfer details to be completed. -@return A tuple containing the recipient and the amount of the transfer. -@abort If the hash lock is invalid, the transfer is not pending, or the hash lock does not match. - - -
fun complete_details<Initiator: store, Recipient: copy, store>(hash_lock: vector<u8>, details: &mut atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>): (Recipient, u64)
-
- - - -
-Implementation - - -
fun complete_details<Initiator: store, Recipient: store + copy>(hash_lock: vector<u8>, details: &mut BridgeTransferDetails<Initiator, Recipient>) : (Recipient, u64) {
-    assert_valid_hash_lock(&hash_lock);
-    assert_pending(details);
-    assert_correct_hash_lock(details, hash_lock);
-    assert_within_timelock(details);
-
-    complete(details);
-
-    (details.addresses.recipient, details.amount)
-}
-
- - - -
- - - -## Function `complete_transfer` - -Completes a bridge transfer by validating the hash lock and updating the transfer state. - -@param bridge_transfer_id The ID of the bridge transfer to complete. -@param hash_lock The hash lock used to validate the transfer. -@return A tuple containing the recipient of the transfer and the amount transferred. -@abort If the bridge transfer details are not found or if the completion checks in complete_details fail. - - -
public(friend) fun complete_transfer<Initiator: store, Recipient: copy, store>(bridge_transfer_id: vector<u8>, hash_lock: vector<u8>): (Recipient, u64)
-
- - - -
-Implementation - - -
public(friend) fun complete_transfer<Initiator: store, Recipient: copy + store>(bridge_transfer_id: vector<u8>, hash_lock: vector<u8>) : (Recipient, u64) acquires SmartTableWrapper {
-    assert!(features::abort_atomic_bridge_enabled(), EATOMIC_BRIDGE_NOT_ENABLED);
-
-    let table = borrow_global_mut<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework);
-
-    let details = smart_table::borrow_mut(
-        &mut table.inner,
-        bridge_transfer_id);
-
-    complete_details<Initiator, Recipient>(hash_lock, details)
-}
-
- - - -
- - - -## Function `cancel_details` - -Cancels a pending bridge transfer if the time lock has expired. - -@param details A mutable reference to the bridge transfer details to be canceled. -@return A tuple containing the initiator of the transfer and the amount to be refunded. -@abort If the transfer is not in a pending state or the time lock has not expired. - - -
fun cancel_details<Initiator: copy, store, Recipient: store>(details: &mut atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>): (Initiator, u64)
-
- - - -
-Implementation - - -
fun cancel_details<Initiator: store + copy, Recipient: store>(details: &mut BridgeTransferDetails<Initiator, Recipient>) : (Initiator, u64) {
-    assert_pending(details);
-    assert_timed_out_lock(details);
-
-    cancel(details);
-
-    (details.addresses.initiator, details.amount)
-}
-
- - - -
- - - -## Function `cancel_transfer` - -Cancels a bridge transfer if it is pending and the time lock has expired. - -@param bridge_transfer_id The ID of the bridge transfer to cancel. -@return A tuple containing the initiator of the transfer and the amount to be refunded. -@abort If the bridge transfer details are not found or if the cancellation conditions in cancel_details fail. - - -
public(friend) fun cancel_transfer<Initiator: copy, store, Recipient: store>(bridge_transfer_id: vector<u8>): (Initiator, u64)
-
- - - -
-Implementation - - -
public(friend) fun cancel_transfer<Initiator: store + copy, Recipient: store>(bridge_transfer_id: vector<u8>) : (Initiator, u64) acquires SmartTableWrapper {
-    assert!(features::abort_atomic_bridge_enabled(), EATOMIC_BRIDGE_NOT_ENABLED);
-
-    let table = borrow_global_mut<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework);
-
-    let details = smart_table::borrow_mut(
-        &mut table.inner,
-        bridge_transfer_id);
-
-    cancel_details<Initiator, Recipient>(details)
-}
-
- - - -
- - - -## Function `bridge_transfer_id` - -Generates a unique bridge transfer ID based on transfer details and nonce. - -@param details The bridge transfer details. -@return The generated bridge transfer ID. - - -
public(friend) fun bridge_transfer_id<Initiator: store, Recipient: store>(details: &atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>): vector<u8>
-
- - - -
-Implementation - - -
public(friend) fun bridge_transfer_id<Initiator: store, Recipient: store>(details: &BridgeTransferDetails<Initiator, Recipient>) : vector<u8> acquires Nonce {
-    let nonce = borrow_global_mut<Nonce>(@aptos_framework);
-    let combined_bytes = vector::empty<u8>();
-    vector::append(&mut combined_bytes, bcs::to_bytes(&details.addresses.initiator));
-    vector::append(&mut combined_bytes, bcs::to_bytes(&details.addresses.recipient));
-    vector::append(&mut combined_bytes, details.hash_lock);
-    if (nonce.inner == MAX_U64) {
-        nonce.inner = 0;  // Wrap around to 0 if at maximum value
-    } else {
-        nonce.inner = nonce.inner + 1;  // Safe to increment without overflow
-    };
-    vector::append(&mut combined_bytes, bcs::to_bytes(&nonce.inner));
-
-    keccak256(combined_bytes)
-}
-
- - - -
- - - -## Function `get_bridge_transfer_details_initiator` - -Gets initiator bridge transfer details given a bridge transfer ID - -@param bridge_transfer_id A 32-byte vector of unsigned 8-bit integers. -@return A BridgeTransferDetails struct. -@abort If there is no transfer in the atomic bridge store. - - -
#[view]
-public fun get_bridge_transfer_details_initiator(bridge_transfer_id: vector<u8>): atomic_bridge_store::BridgeTransferDetails<address, ethereum::EthereumAddress>
-
- - - -
-Implementation - - -
public fun get_bridge_transfer_details_initiator(
-    bridge_transfer_id: vector<u8>
-): BridgeTransferDetails<address, EthereumAddress> acquires SmartTableWrapper {
-    get_bridge_transfer_details(bridge_transfer_id)
-}
-
- - - -
- - - -## Function `get_bridge_transfer_details_counterparty` - -Gets counterparty bridge transfer details given a bridge transfer ID - -@param bridge_transfer_id A 32-byte vector of unsigned 8-bit integers. -@return A BridgeTransferDetails struct. -@abort If there is no transfer in the atomic bridge store. - - -
#[view]
-public fun get_bridge_transfer_details_counterparty(bridge_transfer_id: vector<u8>): atomic_bridge_store::BridgeTransferDetails<ethereum::EthereumAddress, address>
-
- - - -
-Implementation - - -
public fun get_bridge_transfer_details_counterparty(
-    bridge_transfer_id: vector<u8>
-): BridgeTransferDetails<EthereumAddress, address> acquires SmartTableWrapper {
-    get_bridge_transfer_details(bridge_transfer_id)
-}
-
- - - -
- - - -## Function `get_bridge_transfer_details` - - - -
fun get_bridge_transfer_details<Initiator: copy, store, Recipient: copy, store>(bridge_transfer_id: vector<u8>): atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>
-
- - - -
-Implementation - - -
fun get_bridge_transfer_details<Initiator: store + copy, Recipient: store + copy>(bridge_transfer_id: vector<u8>
-): BridgeTransferDetails<Initiator, Recipient> acquires SmartTableWrapper {
-    let table = borrow_global<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework);
-
-    let details_ref = smart_table::borrow(
-        &table.inner,
-        bridge_transfer_id
-    );
-
-    *details_ref
-}
-
- - - -
- - - -## Specification - - - - -### Function `initialize` - - -
public fun initialize(aptos_framework: &signer)
-
- - - - -
let addr = signer::address_of(aptos_framework);
-ensures exists<Nonce>(addr);
-ensures exists<SmartTableWrapper<vector<u8>, BridgeTransferDetails<address, EthereumAddress>>>(addr);
-ensures exists<SmartTableWrapper<vector<u8>, BridgeTransferDetails<EthereumAddress, address>>>(addr);
-
- - - - - - - -
schema TimeLockAbortsIf {
-    time_lock: u64;
-    aborts_if time_lock < MIN_TIME_LOCK;
-    aborts_if !exists<CurrentTimeMicroseconds>(@aptos_framework);
-    aborts_if time_lock > MAX_U64 - timestamp::spec_now_seconds();
-}
-
- - - - - -### Function `create_time_lock` - - -
public(friend) fun create_time_lock(time_lock: u64): u64
-
- - - - -
include TimeLockAbortsIf;
-ensures result == timestamp::spec_now_seconds() + time_lock;
-
- - -If the sum of now() and lock does not overflow, the result is the sum of now() and lock. - - -
ensures (timestamp::spec_now_seconds() + time_lock <= 0xFFFFFFFFFFFFFFFF) ==> result == timestamp::spec_now_seconds() + time_lock;
-
- - - - - -### Function `create_details` - - -
public(friend) fun create_details<Initiator: store, Recipient: store>(initiator: Initiator, recipient: Recipient, amount: u64, hash_lock: vector<u8>, time_lock: u64): atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>
-
- - - - -
include TimeLockAbortsIf;
-aborts_if amount == 0;
-aborts_if len(hash_lock) != 32;
-ensures result == BridgeTransferDetails<Initiator, Recipient> {
-        addresses: AddressPair<Initiator, Recipient> {
-        initiator,
-        recipient
-    },
-    amount,
-    hash_lock,
-    time_lock: timestamp::spec_now_seconds() + time_lock,
-    state: PENDING_TRANSACTION,
-};
-
- - - - - - - -
schema AddAbortsIf<T> {
-    bridge_transfer_id: vector<u8>;
-    table: SmartTable<vector<u8>, T>;
-    aborts_if len(bridge_transfer_id) != 32;
-    aborts_if smart_table::spec_contains(table, bridge_transfer_id);
-    aborts_if !features::spec_is_enabled(features::ATOMIC_BRIDGE);
-}
-
- - - - - -### Function `add` - - -
public(friend) fun add<Initiator: store, Recipient: store>(bridge_transfer_id: vector<u8>, details: atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - - -
let table = global<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework).inner;
-include AddAbortsIf<BridgeTransferDetails<Initiator, Recipient>>;
-aborts_if !exists<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework);
-aborts_if smart_table::spec_contains(table, bridge_transfer_id);
-ensures smart_table::spec_contains(global<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework).inner, bridge_transfer_id);
-ensures smart_table::spec_len(global<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework).inner) ==
-    old(smart_table::spec_len(global<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework).inner)) + 1;
-
- - - - - - - -
schema HashLockAbortsIf {
-    hash_lock: vector<u8>;
-    aborts_if len(hash_lock) != 32;
-}
-
- - - - - - - -
schema BridgetTransferDetailsAbortsIf<Initiator, Recipient> {
-    hash_lock: vector<u8>;
-    details: BridgeTransferDetails<Initiator, Recipient>;
-    include HashLockAbortsIf;
-    aborts_if timestamp::spec_now_seconds() > details.time_lock;
-    aborts_if !exists<CurrentTimeMicroseconds>(@aptos_framework);
-    aborts_if details.state != PENDING_TRANSACTION;
-    aborts_if details.hash_lock != hash_lock;
-}
-
- - - - - -### Function `create_hashlock` - - -
public(friend) fun create_hashlock(pre_image: vector<u8>): vector<u8>
-
- - - - -
aborts_if len(pre_image) == 0;
-
- - - - - -### Function `complete` - - -
fun complete<Initiator: store, Recipient: store>(details: &mut atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - - -
requires details.state == PENDING_TRANSACTION;
-ensures details.state == COMPLETED_TRANSACTION;
-
- - - - - -### Function `cancel` - - -
fun cancel<Initiator: store, Recipient: store>(details: &mut atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>)
-
- - - - -
requires details.state == PENDING_TRANSACTION;
-ensures details.state == CANCELLED_TRANSACTION;
-
- - - - - -### Function `complete_details` - - -
fun complete_details<Initiator: store, Recipient: copy, store>(hash_lock: vector<u8>, details: &mut atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>): (Recipient, u64)
-
- - - - -
include BridgetTransferDetailsAbortsIf<Initiator, Recipient>;
-
- - - - - -### Function `complete_transfer` - - -
public(friend) fun complete_transfer<Initiator: store, Recipient: copy, store>(bridge_transfer_id: vector<u8>, hash_lock: vector<u8>): (Recipient, u64)
-
- - - - -
let table = global<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework).inner;
-aborts_if !features::spec_is_enabled(features::ATOMIC_BRIDGE);
-aborts_if !exists<SmartTableWrapper<vector<u8>, BridgeTransferDetails<Initiator, Recipient>>>(@aptos_framework);
-aborts_if !smart_table::spec_contains(table, bridge_transfer_id);
-let details = smart_table::spec_get(table, bridge_transfer_id);
-include BridgetTransferDetailsAbortsIf<Initiator, Recipient>;
-
- - - - - - - -
schema AbortBridgetTransferDetailsAbortsIf<Initiator, Recipient> {
-    details: BridgeTransferDetails<Initiator, Recipient>;
-    aborts_if details.state != PENDING_TRANSACTION;
-    aborts_if !(timestamp::spec_now_seconds() > details.time_lock);
-    aborts_if !exists<CurrentTimeMicroseconds>(@aptos_framework);
-    ensures details.state == CANCELLED_TRANSACTION;
-}
-
- - - - - -### Function `cancel_details` - - -
fun cancel_details<Initiator: copy, store, Recipient: store>(details: &mut atomic_bridge_store::BridgeTransferDetails<Initiator, Recipient>): (Initiator, u64)
-
- - - - -
include AbortBridgetTransferDetailsAbortsIf<Initiator, Recipient>;
-
- - - - - -# Module `0x1::atomic_bridge_configuration` - - - -- [Resource `BridgeConfig`](#0x1_atomic_bridge_configuration_BridgeConfig) -- [Struct `BridgeConfigOperatorUpdated`](#0x1_atomic_bridge_configuration_BridgeConfigOperatorUpdated) -- [Struct `InitiatorTimeLockUpdated`](#0x1_atomic_bridge_configuration_InitiatorTimeLockUpdated) -- [Struct `CounterpartyTimeLockUpdated`](#0x1_atomic_bridge_configuration_CounterpartyTimeLockUpdated) -- [Constants](#@Constants_0) -- [Function `initialize`](#0x1_atomic_bridge_configuration_initialize) -- [Function `update_bridge_operator`](#0x1_atomic_bridge_configuration_update_bridge_operator) -- [Function `set_initiator_time_lock_duration`](#0x1_atomic_bridge_configuration_set_initiator_time_lock_duration) -- [Function `set_counterparty_time_lock_duration`](#0x1_atomic_bridge_configuration_set_counterparty_time_lock_duration) -- [Function `initiator_timelock_duration`](#0x1_atomic_bridge_configuration_initiator_timelock_duration) -- [Function `counterparty_timelock_duration`](#0x1_atomic_bridge_configuration_counterparty_timelock_duration) -- [Function `bridge_operator`](#0x1_atomic_bridge_configuration_bridge_operator) -- [Function `assert_is_caller_operator`](#0x1_atomic_bridge_configuration_assert_is_caller_operator) -- [Specification](#@Specification_1) - - [Function `initialize`](#@Specification_1_initialize) - - [Function `update_bridge_operator`](#@Specification_1_update_bridge_operator) - - -
use 0x1::event;
-use 0x1::signer;
-use 0x1::system_addresses;
-
- - - - - -## Resource `BridgeConfig` - - - -
struct BridgeConfig has key
-
- - - -
-Fields - - -
-
-bridge_operator: address -
-
- -
-
-initiator_time_lock: u64 -
-
- -
-
-counterparty_time_lock: u64 -
-
- -
-
- - -
- - - -## Struct `BridgeConfigOperatorUpdated` - -Event emitted when the bridge operator is updated. - - -
#[event]
-struct BridgeConfigOperatorUpdated has drop, store
-
- - - -
-Fields - - -
-
-old_operator: address -
-
- -
-
-new_operator: address -
-
- -
-
- - -
- - - -## Struct `InitiatorTimeLockUpdated` - -Event emitted when the initiator time lock has been updated. - - -
#[event]
-struct InitiatorTimeLockUpdated has drop, store
-
- - - -
-Fields - - -
-
-time_lock: u64 -
-
- -
-
- - -
- - - -## Struct `CounterpartyTimeLockUpdated` - -Event emitted when the initiator time lock has been updated. - - -
#[event]
-struct CounterpartyTimeLockUpdated has drop, store
-
- - - -
-Fields - - -
-
-time_lock: u64 -
-
- -
-
- - -
- - - -## Constants - - - - -Counterparty time lock duration is 24 hours in seconds - - -
const COUNTERPARTY_TIME_LOCK_DUARTION: u64 = 86400;
-
- - - - - -Error code for invalid bridge operator - - -
const EINVALID_BRIDGE_OPERATOR: u64 = 1;
-
- - - - - -Initiator time lock duration is 48 hours in seconds - - -
const INITIATOR_TIME_LOCK_DUARTION: u64 = 172800;
-
- - - - - -## Function `initialize` - -Initializes the bridge configuration with Aptos framework as the bridge operator. - -@param aptos_framework The signer representing the Aptos framework. - - -
public fun initialize(aptos_framework: &signer)
-
- - - -
-Implementation - - -
public fun initialize(aptos_framework: &signer) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    let bridge_config = BridgeConfig {
-        bridge_operator: signer::address_of(aptos_framework),
-        initiator_time_lock: INITIATOR_TIME_LOCK_DUARTION,
-        counterparty_time_lock: COUNTERPARTY_TIME_LOCK_DUARTION,
-    };
-    move_to(aptos_framework, bridge_config);
-}
-
- - - -
- - - -## Function `update_bridge_operator` - -Updates the bridge operator, requiring governance validation. - -@param aptos_framework The signer representing the Aptos framework. -@param new_operator The new address to be set as the bridge operator. -@abort If the current operator is the same as the new operator. - - -
public fun update_bridge_operator(aptos_framework: &signer, new_operator: address)
-
- - - -
-Implementation - - -
public fun update_bridge_operator(aptos_framework: &signer, new_operator: address
-)   acquires BridgeConfig {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    let bridge_config = borrow_global_mut<BridgeConfig>(@aptos_framework);
-    let old_operator = bridge_config.bridge_operator;
-    assert!(old_operator != new_operator, EINVALID_BRIDGE_OPERATOR);
-
-    bridge_config.bridge_operator = new_operator;
-
-    event::emit(
-        BridgeConfigOperatorUpdated {
-            old_operator,
-            new_operator,
-        },
-    );
-}
-
- - - -
- - - -## Function `set_initiator_time_lock_duration` - - - -
public fun set_initiator_time_lock_duration(aptos_framework: &signer, time_lock: u64)
-
- - - -
-Implementation - - -
public fun set_initiator_time_lock_duration(aptos_framework: &signer, time_lock: u64
-) acquires BridgeConfig {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    borrow_global_mut<BridgeConfig>(@aptos_framework).initiator_time_lock = time_lock;
-
-    event::emit(
-        InitiatorTimeLockUpdated {
-            time_lock
-        },
-    );
-}
-
- - - -
- - - -## Function `set_counterparty_time_lock_duration` - - - -
public fun set_counterparty_time_lock_duration(aptos_framework: &signer, time_lock: u64)
-
- - - -
-Implementation - - -
public fun set_counterparty_time_lock_duration(aptos_framework: &signer, time_lock: u64
-) acquires BridgeConfig {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    borrow_global_mut<BridgeConfig>(@aptos_framework).counterparty_time_lock = time_lock;
-
-    event::emit(
-        CounterpartyTimeLockUpdated {
-            time_lock
-        },
-    );
-}
-
- - - -
- - - -## Function `initiator_timelock_duration` - - - -
#[view]
-public fun initiator_timelock_duration(): u64
-
- - - -
-Implementation - - -
public fun initiator_timelock_duration() : u64 acquires BridgeConfig {
-    borrow_global<BridgeConfig>(@aptos_framework).initiator_time_lock
-}
-
- - - -
- - - -## Function `counterparty_timelock_duration` - - - -
#[view]
-public fun counterparty_timelock_duration(): u64
-
- - - -
-Implementation - - -
public fun counterparty_timelock_duration() : u64 acquires BridgeConfig {
-    borrow_global<BridgeConfig>(@aptos_framework).counterparty_time_lock
-}
-
- - - -
- - - -## Function `bridge_operator` - -Retrieves the address of the current bridge operator. - -@return The address of the current bridge operator. - - -
#[view]
-public fun bridge_operator(): address
-
- - - -
-Implementation - - -
public fun bridge_operator(): address acquires BridgeConfig {
-    borrow_global_mut<BridgeConfig>(@aptos_framework).bridge_operator
-}
-
- - - -
- - - -## Function `assert_is_caller_operator` - -Asserts that the caller is the current bridge operator. - -@param caller The signer whose authority is being checked. -@abort If the caller is not the current bridge operator. - - -
public(friend) fun assert_is_caller_operator(caller: &signer)
-
- - - -
-Implementation - - -
public(friend) fun assert_is_caller_operator(caller: &signer
-) acquires BridgeConfig {
-    assert!(borrow_global<BridgeConfig>(@aptos_framework).bridge_operator == signer::address_of(caller), EINVALID_BRIDGE_OPERATOR);
-}
-
- - - -
- - - -## Specification - - - - -### Function `initialize` - - -
public fun initialize(aptos_framework: &signer)
-
- - - - -
aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
-aborts_if exists<BridgeConfig>(signer::address_of(aptos_framework));
-ensures global<BridgeConfig>(signer::address_of(aptos_framework)).bridge_operator == signer::address_of(aptos_framework);
-
- - - - - -### Function `update_bridge_operator` - - -
public fun update_bridge_operator(aptos_framework: &signer, new_operator: address)
-
- - - - -
aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
-aborts_if !exists<BridgeConfig>(signer::address_of(aptos_framework));
-aborts_if global<BridgeConfig>(signer::address_of(aptos_framework)).bridge_operator == new_operator;
-ensures global<BridgeConfig>(signer::address_of(aptos_framework)).bridge_operator == new_operator;
-
- - - - - -# Module `0x1::atomic_bridge` - - - -- [Resource `AptosCoinBurnCapability`](#0x1_atomic_bridge_AptosCoinBurnCapability) -- [Resource `AptosCoinMintCapability`](#0x1_atomic_bridge_AptosCoinMintCapability) -- [Resource `AptosFABurnCapabilities`](#0x1_atomic_bridge_AptosFABurnCapabilities) -- [Resource `AptosFAMintCapabilities`](#0x1_atomic_bridge_AptosFAMintCapabilities) -- [Constants](#@Constants_0) -- [Function `initialize`](#0x1_atomic_bridge_initialize) -- [Function `store_aptos_coin_burn_cap`](#0x1_atomic_bridge_store_aptos_coin_burn_cap) -- [Function `store_aptos_coin_mint_cap`](#0x1_atomic_bridge_store_aptos_coin_mint_cap) -- [Function `mint`](#0x1_atomic_bridge_mint) -- [Function `burn`](#0x1_atomic_bridge_burn) - - -
use 0x1::aptos_coin;
-use 0x1::atomic_bridge_configuration;
-use 0x1::atomic_bridge_store;
-use 0x1::coin;
-use 0x1::features;
-use 0x1::fungible_asset;
-use 0x1::system_addresses;
-
- - - - - -## Resource `AptosCoinBurnCapability` - - - -
struct AptosCoinBurnCapability has key
-
- - - -
-Fields - - -
-
-burn_cap: coin::BurnCapability<aptos_coin::AptosCoin> -
-
- -
-
- - -
- - - -## Resource `AptosCoinMintCapability` - - - -
struct AptosCoinMintCapability has key
-
- - - -
-Fields - - -
-
-mint_cap: coin::MintCapability<aptos_coin::AptosCoin> -
-
- -
-
- - -
- - - -## Resource `AptosFABurnCapabilities` - - - -
struct AptosFABurnCapabilities has key
-
- - - -
-Fields - - -
-
-burn_ref: fungible_asset::BurnRef -
-
- -
-
- - -
- - - -## Resource `AptosFAMintCapabilities` - - - -
struct AptosFAMintCapabilities has key
-
- - - -
-Fields - - -
-
-burn_ref: fungible_asset::MintRef -
-
- -
-
- - -
- - - -## Constants - - - - - - -
const EATOMIC_BRIDGE_NOT_ENABLED: u64 = 1;
-
- - - - - -## Function `initialize` - -Initializes the atomic bridge by setting up necessary configurations. - -@param aptos_framework The signer representing the Aptos framework. - - -
public fun initialize(aptos_framework: &signer)
-
- - - -
-Implementation - - -
public fun initialize(aptos_framework: &signer) {
-    atomic_bridge_configuration::initialize(aptos_framework);
-    atomic_bridge_store::initialize(aptos_framework);
-}
-
- - - -
- - - -## Function `store_aptos_coin_burn_cap` - -Stores the burn capability for AptosCoin, converting to a fungible asset reference if the feature is enabled. - -@param aptos_framework The signer representing the Aptos framework. -@param burn_cap The burn capability for AptosCoin. - - -
public fun store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: coin::BurnCapability<aptos_coin::AptosCoin>)
-
- - - -
-Implementation - - -
public fun store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: BurnCapability<AptosCoin>) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    if (features::operations_default_to_fa_apt_store_enabled()) {
-        let burn_ref = coin::convert_and_take_paired_burn_ref(burn_cap);
-        move_to(aptos_framework, AptosFABurnCapabilities { burn_ref });
-    } else {
-        move_to(aptos_framework, AptosCoinBurnCapability { burn_cap })
-    }
-}
-
- - - -
- - - -## Function `store_aptos_coin_mint_cap` - -Stores the mint capability for AptosCoin. - -@param aptos_framework The signer representing the Aptos framework. -@param mint_cap The mint capability for AptosCoin. - - -
public fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: coin::MintCapability<aptos_coin::AptosCoin>)
-
- - - -
-Implementation - - -
public fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: MintCapability<AptosCoin>) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    move_to(aptos_framework, AptosCoinMintCapability { mint_cap })
-}
-
- - - -
- - - -## Function `mint` - -Mints a specified amount of AptosCoin to a recipient's address. - -@param recipient The address of the recipient to mint coins to. -@param amount The amount of AptosCoin to mint. -@abort If the mint capability is not available. - - -
public(friend) fun mint(recipient: address, amount: u64)
-
- - - -
-Implementation - - -
public(friend) fun mint(recipient: address, amount: u64) acquires AptosCoinMintCapability {
-    assert!(features::abort_atomic_bridge_enabled(), EATOMIC_BRIDGE_NOT_ENABLED);
-
-    coin::deposit(recipient, coin::mint(
-        amount,
-        &borrow_global<AptosCoinMintCapability>(@aptos_framework).mint_cap
-    ));
-}
-
- - - -
- - - -## Function `burn` - -Burns a specified amount of AptosCoin from an address. - -@param from The address from which to burn AptosCoin. -@param amount The amount of AptosCoin to burn. -@abort If the burn capability is not available. - - -
public(friend) fun burn(from: address, amount: u64)
-
- - - -
-Implementation - - -
public(friend) fun burn(from: address, amount: u64) acquires AptosCoinBurnCapability {
-    assert!(features::abort_atomic_bridge_enabled(), EATOMIC_BRIDGE_NOT_ENABLED);
-
-    coin::burn_from(
-        from,
-        amount,
-        &borrow_global<AptosCoinBurnCapability>(@aptos_framework).burn_cap,
-    );
-}
-
- - - -
- - - - - -# Module `0x1::atomic_bridge_counterparty` - - - -- [Struct `BridgeTransferLockedEvent`](#0x1_atomic_bridge_counterparty_BridgeTransferLockedEvent) -- [Struct `BridgeTransferCompletedEvent`](#0x1_atomic_bridge_counterparty_BridgeTransferCompletedEvent) -- [Struct `BridgeTransferCancelledEvent`](#0x1_atomic_bridge_counterparty_BridgeTransferCancelledEvent) -- [Resource `BridgeCounterpartyEvents`](#0x1_atomic_bridge_counterparty_BridgeCounterpartyEvents) -- [Function `initialize`](#0x1_atomic_bridge_counterparty_initialize) -- [Function `lock_bridge_transfer_assets`](#0x1_atomic_bridge_counterparty_lock_bridge_transfer_assets) -- [Function `complete_bridge_transfer`](#0x1_atomic_bridge_counterparty_complete_bridge_transfer) -- [Function `abort_bridge_transfer`](#0x1_atomic_bridge_counterparty_abort_bridge_transfer) - - -
use 0x1::account;
-use 0x1::atomic_bridge;
-use 0x1::atomic_bridge_configuration;
-use 0x1::atomic_bridge_store;
-use 0x1::ethereum;
-use 0x1::event;
-
- - - - - -## Struct `BridgeTransferLockedEvent` - -An event triggered upon locking assets for a bridge transfer - - -
#[event]
-struct BridgeTransferLockedEvent has drop, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
-initiator: vector<u8> -
-
- -
-
-recipient: address -
-
- -
-
-amount: u64 -
-
- -
-
-hash_lock: vector<u8> -
-
- -
-
-time_lock: u64 -
-
- -
-
- - -
- - - -## Struct `BridgeTransferCompletedEvent` - -An event triggered upon completing a bridge transfer - - -
#[event]
-struct BridgeTransferCompletedEvent has drop, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
-pre_image: vector<u8> -
-
- -
-
- - -
- - - -## Struct `BridgeTransferCancelledEvent` - -An event triggered upon cancelling a bridge transfer - - -
#[event]
-struct BridgeTransferCancelledEvent has drop, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
- - -
- - - -## Resource `BridgeCounterpartyEvents` - -This struct will store the event handles for bridge events. - - -
struct BridgeCounterpartyEvents has store, key
-
- - - -
-Fields - - -
-
-bridge_transfer_locked_events: event::EventHandle<atomic_bridge_counterparty::BridgeTransferLockedEvent> -
-
- -
-
-bridge_transfer_completed_events: event::EventHandle<atomic_bridge_counterparty::BridgeTransferCompletedEvent> -
-
- -
-
-bridge_transfer_cancelled_events: event::EventHandle<atomic_bridge_counterparty::BridgeTransferCancelledEvent> -
-
- -
-
- - -
- - - -## Function `initialize` - -Initializes the module and stores the EventHandles in the resource. - - -
public fun initialize(aptos_framework: &signer)
-
- - - -
-Implementation - - -
public fun initialize(aptos_framework: &signer) {
-    move_to(aptos_framework, BridgeCounterpartyEvents {
-        bridge_transfer_locked_events: account::new_event_handle<BridgeTransferLockedEvent>(aptos_framework),
-        bridge_transfer_completed_events: account::new_event_handle<BridgeTransferCompletedEvent>(aptos_framework),
-        bridge_transfer_cancelled_events: account::new_event_handle<BridgeTransferCancelledEvent>(aptos_framework),
-    });
-}
-
- - - -
- - - -## Function `lock_bridge_transfer_assets` - -Locks assets for a bridge transfer by the initiator. - -@param caller The signer representing the bridge operator. -@param initiator The initiator's Ethereum address as a vector of bytes. -@param bridge_transfer_id The unique identifier for the bridge transfer. -@param hash_lock The hash lock for securing the transfer. -@param time_lock The time lock duration for the transfer. -@param recipient The address of the recipient on the Aptos blockchain. -@param amount The amount of assets to be locked. -@abort If the caller is not the bridge operator. - - -
public entry fun lock_bridge_transfer_assets(caller: &signer, initiator: vector<u8>, bridge_transfer_id: vector<u8>, hash_lock: vector<u8>, recipient: address, amount: u64)
-
- - - -
-Implementation - - -
public entry fun lock_bridge_transfer_assets (
-    caller: &signer,
-    initiator: vector<u8>,
-    bridge_transfer_id: vector<u8>,
-    hash_lock: vector<u8>,
-    recipient: address,
-    amount: u64
-) acquires BridgeCounterpartyEvents {
-    atomic_bridge_configuration::assert_is_caller_operator(caller);
-    let ethereum_address = ethereum::ethereum_address_no_eip55(initiator);
-    let time_lock = atomic_bridge_configuration::counterparty_timelock_duration();
-    let details = atomic_bridge_store::create_details(
-        ethereum_address,
-        recipient,
-        amount,
-        hash_lock,
-        time_lock
-    );
-
-    // bridge_store::add_counterparty(bridge_transfer_id, details);
-    atomic_bridge_store::add(bridge_transfer_id, details);
-
-    let bridge_events = borrow_global_mut<BridgeCounterpartyEvents>(@aptos_framework);
-
-    event::emit_event(
-        &mut bridge_events.bridge_transfer_locked_events,
-        BridgeTransferLockedEvent {
-            bridge_transfer_id,
-            initiator,
-            recipient,
-            amount,
-            hash_lock,
-            time_lock,
-        },
-    );
-}
-
- - - -
- - - -## Function `complete_bridge_transfer` - -Completes a bridge transfer by revealing the pre-image. - -@param bridge_transfer_id The unique identifier for the bridge transfer. -@param pre_image The pre-image that matches the hash lock to complete the transfer. -@abort If the caller is not the bridge operator or the hash lock validation fails. - - -
public entry fun complete_bridge_transfer(bridge_transfer_id: vector<u8>, pre_image: vector<u8>)
-
- - - -
-Implementation - - -
public entry fun complete_bridge_transfer (
-    bridge_transfer_id: vector<u8>,
-    pre_image: vector<u8>,
-) acquires BridgeCounterpartyEvents {
-    let (recipient, amount) = atomic_bridge_store::complete_transfer<EthereumAddress, address>(
-        bridge_transfer_id,
-        create_hashlock(pre_image)
-    );
-
-    // Mint, fails silently
-    atomic_bridge::mint(recipient, amount);
-
-    let bridge_counterparty_events = borrow_global_mut<BridgeCounterpartyEvents>(@aptos_framework);
-    event::emit_event(
-        &mut bridge_counterparty_events.bridge_transfer_completed_events,
-        BridgeTransferCompletedEvent {
-            bridge_transfer_id,
-            pre_image,
-        },
-    );
-}
-
- - - -
- - - -## Function `abort_bridge_transfer` - -Aborts a bridge transfer if the time lock has expired. - -@param caller The signer representing the bridge operator. -@param bridge_transfer_id The unique identifier for the bridge transfer. -@abort If the caller is not the bridge operator or if the time lock has not expired. - - -
public entry fun abort_bridge_transfer(caller: &signer, bridge_transfer_id: vector<u8>)
-
- - - -
-Implementation - - -
public entry fun abort_bridge_transfer (
-    caller: &signer,
-    bridge_transfer_id: vector<u8>
-) acquires BridgeCounterpartyEvents {
-    atomic_bridge_configuration::assert_is_caller_operator(caller);
-
-    atomic_bridge_store::cancel_transfer<EthereumAddress, address>(bridge_transfer_id);
-
-    let bridge_counterparty_events = borrow_global_mut<BridgeCounterpartyEvents>(@aptos_framework);
-    event::emit_event(
-        &mut bridge_counterparty_events.bridge_transfer_cancelled_events,
-        BridgeTransferCancelledEvent {
-            bridge_transfer_id,
-        },
-    );
-}
-
- - - -
- - - - - -# Module `0x1::atomic_bridge_initiator` - - - -- [Struct `BridgeTransferInitiatedEvent`](#0x1_atomic_bridge_initiator_BridgeTransferInitiatedEvent) -- [Struct `BridgeTransferCompletedEvent`](#0x1_atomic_bridge_initiator_BridgeTransferCompletedEvent) -- [Struct `BridgeTransferRefundedEvent`](#0x1_atomic_bridge_initiator_BridgeTransferRefundedEvent) -- [Resource `BridgeInitiatorEvents`](#0x1_atomic_bridge_initiator_BridgeInitiatorEvents) -- [Function `initialize`](#0x1_atomic_bridge_initiator_initialize) -- [Function `initiate_bridge_transfer`](#0x1_atomic_bridge_initiator_initiate_bridge_transfer) -- [Function `complete_bridge_transfer`](#0x1_atomic_bridge_initiator_complete_bridge_transfer) -- [Function `refund_bridge_transfer`](#0x1_atomic_bridge_initiator_refund_bridge_transfer) - - -
use 0x1::account;
-use 0x1::atomic_bridge;
-use 0x1::atomic_bridge_configuration;
-use 0x1::atomic_bridge_store;
-use 0x1::ethereum;
-use 0x1::event;
-use 0x1::signer;
-
- - - - - -## Struct `BridgeTransferInitiatedEvent` - - - -
#[event]
-struct BridgeTransferInitiatedEvent has drop, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
-initiator: address -
-
- -
-
-recipient: vector<u8> -
-
- -
-
-amount: u64 -
-
- -
-
-hash_lock: vector<u8> -
-
- -
-
-time_lock: u64 -
-
- -
-
- - -
- - - -## Struct `BridgeTransferCompletedEvent` - - - -
#[event]
-struct BridgeTransferCompletedEvent has drop, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
-pre_image: vector<u8> -
-
- -
-
- - -
- - - -## Struct `BridgeTransferRefundedEvent` - - - -
#[event]
-struct BridgeTransferRefundedEvent has drop, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
- - -
- - - -## Resource `BridgeInitiatorEvents` - -This struct will store the event handles for bridge events. - - -
struct BridgeInitiatorEvents has store, key
-
- - - -
-Fields - - -
-
-bridge_transfer_initiated_events: event::EventHandle<atomic_bridge_initiator::BridgeTransferInitiatedEvent> -
-
- -
-
-bridge_transfer_completed_events: event::EventHandle<atomic_bridge_initiator::BridgeTransferCompletedEvent> -
-
- -
-
-bridge_transfer_refunded_events: event::EventHandle<atomic_bridge_initiator::BridgeTransferRefundedEvent> -
-
- -
-
- - -
- - - -## Function `initialize` - -Initializes the module and stores the EventHandles in the resource. - - -
public fun initialize(aptos_framework: &signer)
-
- - - -
-Implementation - - -
public fun initialize(aptos_framework: &signer) {
-    move_to(aptos_framework, BridgeInitiatorEvents {
-        bridge_transfer_initiated_events: account::new_event_handle<BridgeTransferInitiatedEvent>(aptos_framework),
-        bridge_transfer_completed_events: account::new_event_handle<BridgeTransferCompletedEvent>(aptos_framework),
-        bridge_transfer_refunded_events: account::new_event_handle<BridgeTransferRefundedEvent>(aptos_framework),
-    });
-}
-
- - - -
- - - -## Function `initiate_bridge_transfer` - -Initiate a bridge transfer of ETH from Movement to the base layer -Anyone can initiate a bridge transfer from the source chain -The amount is burnt from the initiator - - -
public entry fun initiate_bridge_transfer(initiator: &signer, recipient: vector<u8>, hash_lock: vector<u8>, amount: u64)
-
- - - -
-Implementation - - -
public entry fun initiate_bridge_transfer(
-    initiator: &signer,
-    recipient: vector<u8>,
-    hash_lock: vector<u8>,
-    amount: u64
-) acquires BridgeInitiatorEvents {
-    let ethereum_address = ethereum::ethereum_address_no_eip55(recipient);
-    let initiator_address = signer::address_of(initiator);
-    let time_lock = atomic_bridge_configuration::initiator_timelock_duration();
-
-    let details =
-        atomic_bridge_store::create_details(
-            initiator_address,
-            ethereum_address, amount,
-            hash_lock,
-            time_lock
-        );
-
-    let bridge_transfer_id = bridge_transfer_id(&details);
-    atomic_bridge_store::add(bridge_transfer_id, details);
-    atomic_bridge::burn(initiator_address, amount);
-
-    let bridge_initiator_events = borrow_global_mut<BridgeInitiatorEvents>(@aptos_framework);
-    event::emit_event(
-        &mut bridge_initiator_events.bridge_transfer_initiated_events,
-        BridgeTransferInitiatedEvent {
-            bridge_transfer_id,
-            initiator: initiator_address,
-            recipient,
-            amount,
-            hash_lock,
-            time_lock
-        },
-    );
-}
-
- - - -
- - - -## Function `complete_bridge_transfer` - -Bridge operator can complete the transfer - - -
public entry fun complete_bridge_transfer(caller: &signer, bridge_transfer_id: vector<u8>, pre_image: vector<u8>)
-
- - - -
-Implementation - - -
public entry fun complete_bridge_transfer (
-    caller: &signer,
-    bridge_transfer_id: vector<u8>,
-    pre_image: vector<u8>,
-) acquires BridgeInitiatorEvents {
-    assert_is_caller_operator(caller);
-    let (_, _) = atomic_bridge_store::complete_transfer<address, EthereumAddress>(bridge_transfer_id, create_hashlock(pre_image));
-
-    let bridge_initiator_events = borrow_global_mut<BridgeInitiatorEvents>(@aptos_framework);
-    event::emit_event(
-        &mut bridge_initiator_events.bridge_transfer_completed_events,
-        BridgeTransferCompletedEvent {
-            bridge_transfer_id,
-            pre_image,
-        },
-    );
-}
-
- - - -
- - - -## Function `refund_bridge_transfer` - -Anyone can refund the transfer on the source chain once time lock has passed - - -
public entry fun refund_bridge_transfer(_caller: &signer, bridge_transfer_id: vector<u8>)
-
- - - -
-Implementation - - -
public entry fun refund_bridge_transfer (
-    _caller: &signer,
-    bridge_transfer_id: vector<u8>,
-) acquires BridgeInitiatorEvents {
-    let (receiver, amount) = atomic_bridge_store::cancel_transfer<address, EthereumAddress>(bridge_transfer_id);
-    atomic_bridge::mint(receiver, amount);
-
-    let bridge_initiator_events = borrow_global_mut<BridgeInitiatorEvents>(@aptos_framework);
-    event::emit_event(
-        &mut bridge_initiator_events.bridge_transfer_refunded_events,
-        BridgeTransferRefundedEvent {
-            bridge_transfer_id,
-        },
-    );
-}
-
- - - -
- - -[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/auth_data.md b/aptos-move/framework/aptos-framework/doc/auth_data.md new file mode 100644 index 0000000000000..52a376ae126bc --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/auth_data.md @@ -0,0 +1,248 @@ + + + +# Module `0x1::auth_data` + + + +- [Enum `AbstractionAuthData`](#0x1_auth_data_AbstractionAuthData) +- [Constants](#@Constants_0) +- [Function `digest`](#0x1_auth_data_digest) +- [Function `authenticator`](#0x1_auth_data_authenticator) +- [Function `is_derivable`](#0x1_auth_data_is_derivable) +- [Function `derivable_abstract_signature`](#0x1_auth_data_derivable_abstract_signature) +- [Function `derivable_abstract_public_key`](#0x1_auth_data_derivable_abstract_public_key) + + +
use 0x1::error;
+
+ + + + + +## Enum `AbstractionAuthData` + + + +
enum AbstractionAuthData has copy, drop
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+digest: vector<u8> +
+
+ +
+
+authenticator: vector<u8> +
+
+ +
+
+ + +
+ +
+ +
+DerivableV1 + + +
+Fields + + +
+
+digest: vector<u8> +
+
+ +
+
+abstract_signature: vector<u8> +
+
+ +
+
+abstract_public_key: vector<u8> +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Constants + + + + + + +
const ENOT_DERIVABLE_AUTH_DATA: u64 = 2;
+
+ + + + + + + +
const ENOT_REGULAR_AUTH_DATA: u64 = 1;
+
+ + + + + +## Function `digest` + + + +
public fun digest(self: &auth_data::AbstractionAuthData): &vector<u8>
+
+ + + +
+Implementation + + +
public fun digest(self: &AbstractionAuthData): &vector<u8> {
+    &self.digest
+}
+
+ + + +
+ + + +## Function `authenticator` + + + +
public fun authenticator(self: &auth_data::AbstractionAuthData): &vector<u8>
+
+ + + +
+Implementation + + +
public fun authenticator(self: &AbstractionAuthData): &vector<u8> {
+    assert!(self is V1, error::invalid_argument(ENOT_REGULAR_AUTH_DATA));
+    &self.authenticator
+}
+
+ + + +
+ + + +## Function `is_derivable` + + + +
public fun is_derivable(self: &auth_data::AbstractionAuthData): bool
+
+ + + +
+Implementation + + +
public fun is_derivable(self: &AbstractionAuthData): bool {
+    self is DerivableV1
+}
+
+ + + +
+ + + +## Function `derivable_abstract_signature` + + + +
public fun derivable_abstract_signature(self: &auth_data::AbstractionAuthData): &vector<u8>
+
+ + + +
+Implementation + + +
public fun derivable_abstract_signature(self: &AbstractionAuthData): &vector<u8> {
+    assert!(self is DerivableV1, error::invalid_argument(ENOT_REGULAR_AUTH_DATA));
+    &self.abstract_signature
+}
+
+ + + +
+ + + +## Function `derivable_abstract_public_key` + + + +
public fun derivable_abstract_public_key(self: &auth_data::AbstractionAuthData): &vector<u8>
+
+ + + +
+Implementation + + +
public fun derivable_abstract_public_key(self: &AbstractionAuthData): &vector<u8> {
+    assert!(self is DerivableV1, error::invalid_argument(ENOT_DERIVABLE_AUTH_DATA));
+    &self.abstract_public_key
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/base16.md b/aptos-move/framework/aptos-framework/doc/base16.md new file mode 100644 index 0000000000000..399bec38503e7 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/base16.md @@ -0,0 +1,113 @@ + + + +# Module `0x1::base16` + + + +- [Function `hex_char_to_u8`](#0x1_base16_hex_char_to_u8) +- [Function `base16_utf8_to_vec_u8`](#0x1_base16_base16_utf8_to_vec_u8) +- [Specification](#@Specification_0) + - [Function `base16_utf8_to_vec_u8`](#@Specification_0_base16_utf8_to_vec_u8) + + +
+ + + + + +## Function `hex_char_to_u8` + + + +
public(friend) fun hex_char_to_u8(c: u8): u8
+
+ + + +
+Implementation + + +
public(friend) fun hex_char_to_u8(c: u8): u8 {
+    if (c >= 48 && c <= 57) {  // '0' to '9'
+        c - 48
+    } else if (c >= 65 && c <= 70) { // 'A' to 'F'
+        c - 55
+    } else if (c >= 97 && c <= 102) { // 'a' to 'f'
+        c - 87
+    } else {
+        abort 1
+    }
+}
+
+ + + +
+ + + +## Function `base16_utf8_to_vec_u8` + + + +
public(friend) fun base16_utf8_to_vec_u8(str: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
public(friend) fun base16_utf8_to_vec_u8(str: vector<u8>): vector<u8> {
+    let result = vector::empty<u8>();
+    let i = 0;
+    while (i < vector::length(&str)) {
+        let c1 = vector::borrow(&str, i);
+        let c2 = vector::borrow(&str, i + 1);
+        let byte = hex_char_to_u8(*c1) << 4 | hex_char_to_u8(*c2);
+        vector::push_back(&mut result, byte);
+        i = i + 2;
+    };
+    result
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `base16_utf8_to_vec_u8` + + +
public(friend) fun base16_utf8_to_vec_u8(str: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_base16_utf8_to_vec_u8(str);
+
+ + + + + + + +
fun spec_base16_utf8_to_vec_u8(str: vector<u8>): vector<u8>;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/bcs_stream.md b/aptos-move/framework/aptos-framework/doc/bcs_stream.md new file mode 100644 index 0000000000000..1de9a5914caf2 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/bcs_stream.md @@ -0,0 +1,663 @@ + + + +# Module `0x1::bcs_stream` + +This module enables the deserialization of BCS-formatted byte arrays into Move primitive types. +Deserialization Strategies: +- Per-Byte Deserialization: Employed for most types to ensure lower gas consumption, this method processes each byte +individually to match the length and type requirements of target Move types. +- Exception: For the deserialize_address function, the function-based approach from aptos_std::from_bcs is used +due to type constraints, even though it is generally more gas-intensive. +- This can be optimized further by introducing native vector slices. +Application: +- This deserializer is particularly valuable for processing BCS serialized data within Move modules, +especially useful for systems requiring cross-chain message interpretation or off-chain data verification. + + +- [Struct `BCSStream`](#0x1_bcs_stream_BCSStream) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_bcs_stream_new) +- [Function `deserialize_uleb128`](#0x1_bcs_stream_deserialize_uleb128) +- [Function `deserialize_bool`](#0x1_bcs_stream_deserialize_bool) +- [Function `deserialize_address`](#0x1_bcs_stream_deserialize_address) +- [Function `deserialize_u8`](#0x1_bcs_stream_deserialize_u8) +- [Function `deserialize_u16`](#0x1_bcs_stream_deserialize_u16) +- [Function `deserialize_u32`](#0x1_bcs_stream_deserialize_u32) +- [Function `deserialize_u64`](#0x1_bcs_stream_deserialize_u64) +- [Function `deserialize_u128`](#0x1_bcs_stream_deserialize_u128) +- [Function `deserialize_u256`](#0x1_bcs_stream_deserialize_u256) +- [Function `deserialize_u256_entry`](#0x1_bcs_stream_deserialize_u256_entry) +- [Function `deserialize_vector`](#0x1_bcs_stream_deserialize_vector) +- [Function `deserialize_string`](#0x1_bcs_stream_deserialize_string) +- [Function `deserialize_option`](#0x1_bcs_stream_deserialize_option) +- [Specification](#@Specification_1) + + +
use 0x1::error;
+use 0x1::from_bcs;
+use 0x1::string;
+use 0x1::vector;
+
+ + + + + +## Struct `BCSStream` + + + +
struct BCSStream has drop
+
+ + + +
+Fields + + +
+
+data: vector<u8> +
+
+ Byte buffer containing the serialized data. +
+
+cur: u64 +
+
+ Cursor indicating the current position in the byte buffer. +
+
+ + +
+ + + +## Constants + + + + +The data does not fit the expected format. + + +
const EMALFORMED_DATA: u64 = 1;
+
+ + + + + +There are not enough bytes to deserialize for the given type. + + +
const EOUT_OF_BYTES: u64 = 2;
+
+ + + + + +## Function `new` + +Constructs a new BCSStream instance from the provided byte array. + + +
public fun new(data: vector<u8>): bcs_stream::BCSStream
+
+ + + +
+Implementation + + +
public fun new(data: vector<u8>): BCSStream {
+    BCSStream {
+        data,
+        cur: 0,
+    }
+}
+
+ + + +
+ + + +## Function `deserialize_uleb128` + +Deserializes a ULEB128-encoded integer from the stream. +In the BCS format, lengths of vectors are represented using ULEB128 encoding. + + +
public fun deserialize_uleb128(stream: &mut bcs_stream::BCSStream): u64
+
+ + + +
+Implementation + + +
public fun deserialize_uleb128(stream: &mut BCSStream): u64 {
+    let res = 0;
+    let shift = 0;
+
+    while (stream.cur < vector::length(&stream.data)) {
+        let byte = *vector::borrow(&stream.data, stream.cur);
+        stream.cur = stream.cur + 1;
+
+        let val = ((byte & 0x7f) as u64);
+        if (((val << shift) >> shift) != val) {
+            abort error::invalid_argument(EMALFORMED_DATA)
+        };
+        res = res | (val << shift);
+
+        if ((byte & 0x80) == 0) {
+            if (shift > 0 && val == 0) {
+                abort error::invalid_argument(EMALFORMED_DATA)
+            };
+            return res
+        };
+
+        shift = shift + 7;
+        if (shift > 64) {
+            abort error::invalid_argument(EMALFORMED_DATA)
+        };
+    };
+
+    abort error::out_of_range(EOUT_OF_BYTES)
+}
+
+ + + +
+ + + +## Function `deserialize_bool` + +Deserializes a bool value from the stream. + + +
public fun deserialize_bool(stream: &mut bcs_stream::BCSStream): bool
+
+ + + +
+Implementation + + +
public fun deserialize_bool(stream: &mut BCSStream): bool {
+    assert!(stream.cur < vector::length(&stream.data), error::out_of_range(EOUT_OF_BYTES));
+    let byte = *vector::borrow(&stream.data, stream.cur);
+    stream.cur = stream.cur + 1;
+    if (byte == 0) {
+        false
+    } else if (byte == 1) {
+        true
+    } else {
+        abort error::invalid_argument(EMALFORMED_DATA)
+    }
+}
+
+ + + +
+ + + +## Function `deserialize_address` + +Deserializes an address value from the stream. +32-byte address values are serialized using little-endian byte order. +This function utilizes the to_address function from the aptos_std::from_bcs module, +because the Move type system does not permit per-byte referencing of addresses. + + +
public fun deserialize_address(stream: &mut bcs_stream::BCSStream): address
+
+ + + +
+Implementation + + +
public fun deserialize_address(stream: &mut BCSStream): address {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 32 <= vector::length(data), error::out_of_range(EOUT_OF_BYTES));
+    let res = from_bcs::to_address(vector::slice(data, cur, cur + 32));
+
+    stream.cur = cur + 32;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u8` + +Deserializes a u8 value from the stream. +1-byte u8 values are serialized using little-endian byte order. + + +
public fun deserialize_u8(stream: &mut bcs_stream::BCSStream): u8
+
+ + + +
+Implementation + + +
public fun deserialize_u8(stream: &mut BCSStream): u8 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur < vector::length(data), error::out_of_range(EOUT_OF_BYTES));
+
+    let res = *vector::borrow(data, cur);
+
+    stream.cur = cur + 1;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u16` + +Deserializes a u16 value from the stream. +2-byte u16 values are serialized using little-endian byte order. + + +
public fun deserialize_u16(stream: &mut bcs_stream::BCSStream): u16
+
+ + + +
+Implementation + + +
public fun deserialize_u16(stream: &mut BCSStream): u16 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 2 <= vector::length(data), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (*vector::borrow(data, cur) as u16) |
+            ((*vector::borrow(data, cur + 1) as u16) << 8)
+    ;
+
+    stream.cur = stream.cur + 2;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u32` + +Deserializes a u32 value from the stream. +4-byte u32 values are serialized using little-endian byte order. + + +
public fun deserialize_u32(stream: &mut bcs_stream::BCSStream): u32
+
+ + + +
+Implementation + + +
public fun deserialize_u32(stream: &mut BCSStream): u32 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 4 <= vector::length(data), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (*vector::borrow(data, cur) as u32) |
+            ((*vector::borrow(data, cur + 1) as u32) << 8) |
+            ((*vector::borrow(data, cur + 2) as u32) << 16) |
+            ((*vector::borrow(data, cur + 3) as u32) << 24)
+    ;
+
+    stream.cur = stream.cur + 4;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u64` + +Deserializes a u64 value from the stream. +8-byte u64 values are serialized using little-endian byte order. + + +
public fun deserialize_u64(stream: &mut bcs_stream::BCSStream): u64
+
+ + + +
+Implementation + + +
public fun deserialize_u64(stream: &mut BCSStream): u64 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 8 <= vector::length(data), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (*vector::borrow(data, cur) as u64) |
+            ((*vector::borrow(data, cur + 1) as u64) << 8) |
+            ((*vector::borrow(data, cur + 2) as u64) << 16) |
+            ((*vector::borrow(data, cur + 3) as u64) << 24) |
+            ((*vector::borrow(data, cur + 4) as u64) << 32) |
+            ((*vector::borrow(data, cur + 5) as u64) << 40) |
+            ((*vector::borrow(data, cur + 6) as u64) << 48) |
+            ((*vector::borrow(data, cur + 7) as u64) << 56)
+    ;
+
+    stream.cur = stream.cur + 8;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u128` + +Deserializes a u128 value from the stream. +16-byte u128 values are serialized using little-endian byte order. + + +
public fun deserialize_u128(stream: &mut bcs_stream::BCSStream): u128
+
+ + + +
+Implementation + + +
public fun deserialize_u128(stream: &mut BCSStream): u128 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 16 <= vector::length(data), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (*vector::borrow(data, cur) as u128) |
+            ((*vector::borrow(data, cur + 1) as u128) << 8) |
+            ((*vector::borrow(data, cur + 2) as u128) << 16) |
+            ((*vector::borrow(data, cur + 3) as u128) << 24) |
+            ((*vector::borrow(data, cur + 4) as u128) << 32) |
+            ((*vector::borrow(data, cur + 5) as u128) << 40) |
+            ((*vector::borrow(data, cur + 6) as u128) << 48) |
+            ((*vector::borrow(data, cur + 7) as u128) << 56) |
+            ((*vector::borrow(data, cur + 8) as u128) << 64) |
+            ((*vector::borrow(data, cur + 9) as u128) << 72) |
+            ((*vector::borrow(data, cur + 10) as u128) << 80) |
+            ((*vector::borrow(data, cur + 11) as u128) << 88) |
+            ((*vector::borrow(data, cur + 12) as u128) << 96) |
+            ((*vector::borrow(data, cur + 13) as u128) << 104) |
+            ((*vector::borrow(data, cur + 14) as u128) << 112) |
+            ((*vector::borrow(data, cur + 15) as u128) << 120)
+    ;
+
+    stream.cur = stream.cur + 16;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u256` + +Deserializes a u256 value from the stream. +32-byte u256 values are serialized using little-endian byte order. + + +
public fun deserialize_u256(stream: &mut bcs_stream::BCSStream): u256
+
+ + + +
+Implementation + + +
public fun deserialize_u256(stream: &mut BCSStream): u256 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 32 <= vector::length(data), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (*vector::borrow(data, cur) as u256) |
+            ((*vector::borrow(data, cur + 1) as u256) << 8) |
+            ((*vector::borrow(data, cur + 2) as u256) << 16) |
+            ((*vector::borrow(data, cur + 3) as u256) << 24) |
+            ((*vector::borrow(data, cur + 4) as u256) << 32) |
+            ((*vector::borrow(data, cur + 5) as u256) << 40) |
+            ((*vector::borrow(data, cur + 6) as u256) << 48) |
+            ((*vector::borrow(data, cur + 7) as u256) << 56) |
+            ((*vector::borrow(data, cur + 8) as u256) << 64) |
+            ((*vector::borrow(data, cur + 9) as u256) << 72) |
+            ((*vector::borrow(data, cur + 10) as u256) << 80) |
+            ((*vector::borrow(data, cur + 11) as u256) << 88) |
+            ((*vector::borrow(data, cur + 12) as u256) << 96) |
+            ((*vector::borrow(data, cur + 13) as u256) << 104) |
+            ((*vector::borrow(data, cur + 14) as u256) << 112) |
+            ((*vector::borrow(data, cur + 15) as u256) << 120) |
+            ((*vector::borrow(data, cur + 16) as u256) << 128) |
+            ((*vector::borrow(data, cur + 17) as u256) << 136) |
+            ((*vector::borrow(data, cur + 18) as u256) << 144) |
+            ((*vector::borrow(data, cur + 19) as u256) << 152) |
+            ((*vector::borrow(data, cur + 20) as u256) << 160) |
+            ((*vector::borrow(data, cur + 21) as u256) << 168) |
+            ((*vector::borrow(data, cur + 22) as u256) << 176) |
+            ((*vector::borrow(data, cur + 23) as u256) << 184) |
+            ((*vector::borrow(data, cur + 24) as u256) << 192) |
+            ((*vector::borrow(data, cur + 25) as u256) << 200) |
+            ((*vector::borrow(data, cur + 26) as u256) << 208) |
+            ((*vector::borrow(data, cur + 27) as u256) << 216) |
+            ((*vector::borrow(data, cur + 28) as u256) << 224) |
+            ((*vector::borrow(data, cur + 29) as u256) << 232) |
+            ((*vector::borrow(data, cur + 30) as u256) << 240) |
+            ((*vector::borrow(data, cur + 31) as u256) << 248)
+    ;
+
+    stream.cur = stream.cur + 32;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u256_entry` + +Deserializes a u256 value from the stream. + + +
public entry fun deserialize_u256_entry(data: vector<u8>, cursor: u64)
+
+ + + +
+Implementation + + +
public entry fun deserialize_u256_entry(data: vector<u8>, cursor: u64) {
+    let stream = BCSStream {
+        data: data,
+        cur: cursor,
+    };
+    deserialize_u256(&mut stream);
+}
+
+ + + +
+ + + +## Function `deserialize_vector` + +Deserializes an array of BCS deserializable elements from the stream. +First, reads the length of the vector, which is in uleb128 format. +After determining the length, it then reads the contents of the vector. +The elem_deserializer lambda expression is used sequentially to deserialize each element of the vector. + + +
public fun deserialize_vector<E>(stream: &mut bcs_stream::BCSStream, elem_deserializer: |&mut bcs_stream::BCSStream|E): vector<E>
+
+ + + +
+Implementation + + +
public inline fun deserialize_vector<E>(stream: &mut BCSStream, elem_deserializer: |&mut BCSStream| E): vector<E> {
+    let len = deserialize_uleb128(stream);
+    let v = vector::empty();
+
+    let i = 0;
+    while (i < len) {
+        vector::push_back(&mut v, elem_deserializer(stream));
+        i = i + 1;
+    };
+
+    v
+}
+
+ + + +
+ + + +## Function `deserialize_string` + +Deserializes utf-8 String from the stream. +First, reads the length of the String, which is in uleb128 format. +After determining the length, it then reads the contents of the String. + + +
public fun deserialize_string(stream: &mut bcs_stream::BCSStream): string::String
+
+ + + +
+Implementation + + +
public fun deserialize_string(stream: &mut BCSStream): String {
+    let len = deserialize_uleb128(stream);
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + len <= vector::length(data), error::out_of_range(EOUT_OF_BYTES));
+
+    let res = string::utf8(vector::slice(data, cur, cur + len));
+    stream.cur = cur + len;
+
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_option` + +Deserializes Option from the stream. +First, reads a single byte representing the presence (0x01) or absence (0x00) of data. +After determining the presence of data, it then reads the actual data if present. +The elem_deserializer lambda expression is used to deserialize the element contained within the Option. + + +
public fun deserialize_option<E>(stream: &mut bcs_stream::BCSStream, elem_deserializer: |&mut bcs_stream::BCSStream|E): option::Option<E>
+
+ + + +
+Implementation + + +
public inline fun deserialize_option<E>(stream: &mut BCSStream, elem_deserializer: |&mut BCSStream| E): Option<E> {
+    let is_data = deserialize_bool(stream);
+    if (is_data) {
+        option::some(elem_deserializer(stream))
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/big_ordered_map.md b/aptos-move/framework/aptos-framework/doc/big_ordered_map.md new file mode 100644 index 0000000000000..b5d0b75b5a487 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/big_ordered_map.md @@ -0,0 +1,3086 @@ + + + +# Module `0x1::big_ordered_map` + +This module provides an implementation for an big ordered map. +Big means that it is stored across multiple resources, and doesn't have an +upper limit on number of elements it can contain. + +Keys point to values, and each key in the map must be unique. + +Currently, one implementation is provided - BPlusTreeMap, backed by a B+Tree, +with each node being a separate resource, internally containing OrderedMap. + +BPlusTreeMap is chosen since the biggest (performance and gast) +costs are reading resources, and it: +* reduces number of resource accesses +* reduces number of rebalancing operations, and makes each rebalancing +operation touch only few resources +* it allows for parallelism for keys that are not close to each other, +once it contains enough keys + +Note: Default configuration (used in new_with_config(0, 0, false)) allows for keys and values of up to 5KB, +or 100 times the first (key, value), to satisfy general needs. +If you need larger, use other constructor methods. +Based on initial configuration, BigOrderedMap will always accept insertion of keys and values +up to the allowed size, and will abort with EKEY_BYTES_TOO_LARGE or EARGUMENT_BYTES_TOO_LARGE. + +TODO: all iterator functions are public(friend) for now, so that they can be modified in a +backward incompatible way. Type is also named IteratorPtr, so that Iterator is free to use later. +They are waiting for Move improvement that will allow references to be part of the struct, +allowing cleaner iterator APIs. + + +- [Enum `Node`](#0x1_big_ordered_map_Node) +- [Enum `Child`](#0x1_big_ordered_map_Child) +- [Enum `IteratorPtr`](#0x1_big_ordered_map_IteratorPtr) +- [Enum `BigOrderedMap`](#0x1_big_ordered_map_BigOrderedMap) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_big_ordered_map_new) +- [Function `new_with_reusable`](#0x1_big_ordered_map_new_with_reusable) +- [Function `new_with_type_size_hints`](#0x1_big_ordered_map_new_with_type_size_hints) +- [Function `new_with_config`](#0x1_big_ordered_map_new_with_config) +- [Function `new_from`](#0x1_big_ordered_map_new_from) +- [Function `destroy_empty`](#0x1_big_ordered_map_destroy_empty) +- [Function `allocate_spare_slots`](#0x1_big_ordered_map_allocate_spare_slots) +- [Function `is_empty`](#0x1_big_ordered_map_is_empty) +- [Function `compute_length`](#0x1_big_ordered_map_compute_length) +- [Function `add`](#0x1_big_ordered_map_add) +- [Function `upsert`](#0x1_big_ordered_map_upsert) +- [Function `remove`](#0x1_big_ordered_map_remove) +- [Function `add_all`](#0x1_big_ordered_map_add_all) +- [Function `pop_front`](#0x1_big_ordered_map_pop_front) +- [Function `pop_back`](#0x1_big_ordered_map_pop_back) +- [Function `lower_bound`](#0x1_big_ordered_map_lower_bound) +- [Function `find`](#0x1_big_ordered_map_find) +- [Function `contains`](#0x1_big_ordered_map_contains) +- [Function `borrow`](#0x1_big_ordered_map_borrow) +- [Function `get`](#0x1_big_ordered_map_get) +- [Function `borrow_mut`](#0x1_big_ordered_map_borrow_mut) +- [Function `borrow_front`](#0x1_big_ordered_map_borrow_front) +- [Function `borrow_back`](#0x1_big_ordered_map_borrow_back) +- [Function `prev_key`](#0x1_big_ordered_map_prev_key) +- [Function `next_key`](#0x1_big_ordered_map_next_key) +- [Function `to_ordered_map`](#0x1_big_ordered_map_to_ordered_map) +- [Function `keys`](#0x1_big_ordered_map_keys) +- [Function `for_each_and_clear`](#0x1_big_ordered_map_for_each_and_clear) +- [Function `for_each`](#0x1_big_ordered_map_for_each) +- [Function `for_each_ref`](#0x1_big_ordered_map_for_each_ref) +- [Function `for_each_ref_friend`](#0x1_big_ordered_map_for_each_ref_friend) +- [Function `for_each_mut`](#0x1_big_ordered_map_for_each_mut) +- [Function `destroy`](#0x1_big_ordered_map_destroy) +- [Function `new_begin_iter`](#0x1_big_ordered_map_new_begin_iter) +- [Function `new_end_iter`](#0x1_big_ordered_map_new_end_iter) +- [Function `iter_is_begin`](#0x1_big_ordered_map_iter_is_begin) +- [Function `iter_is_end`](#0x1_big_ordered_map_iter_is_end) +- [Function `iter_borrow_key`](#0x1_big_ordered_map_iter_borrow_key) +- [Function `iter_borrow`](#0x1_big_ordered_map_iter_borrow) +- [Function `iter_borrow_mut`](#0x1_big_ordered_map_iter_borrow_mut) +- [Function `iter_next`](#0x1_big_ordered_map_iter_next) +- [Function `iter_prev`](#0x1_big_ordered_map_iter_prev) +- [Function `for_each_leaf_node_ref`](#0x1_big_ordered_map_for_each_leaf_node_ref) +- [Function `borrow_node`](#0x1_big_ordered_map_borrow_node) +- [Function `borrow_node_mut`](#0x1_big_ordered_map_borrow_node_mut) +- [Function `add_or_upsert_impl`](#0x1_big_ordered_map_add_or_upsert_impl) +- [Function `validate_dynamic_size_and_init_max_degrees`](#0x1_big_ordered_map_validate_dynamic_size_and_init_max_degrees) +- [Function `validate_static_size_and_init_max_degrees`](#0x1_big_ordered_map_validate_static_size_and_init_max_degrees) +- [Function `validate_size_and_init_max_degrees`](#0x1_big_ordered_map_validate_size_and_init_max_degrees) +- [Function `destroy_inner_child`](#0x1_big_ordered_map_destroy_inner_child) +- [Function `destroy_empty_node`](#0x1_big_ordered_map_destroy_empty_node) +- [Function `new_node`](#0x1_big_ordered_map_new_node) +- [Function `new_node_with_children`](#0x1_big_ordered_map_new_node_with_children) +- [Function `new_inner_child`](#0x1_big_ordered_map_new_inner_child) +- [Function `new_leaf_child`](#0x1_big_ordered_map_new_leaf_child) +- [Function `new_iter`](#0x1_big_ordered_map_new_iter) +- [Function `find_leaf`](#0x1_big_ordered_map_find_leaf) +- [Function `find_leaf_path`](#0x1_big_ordered_map_find_leaf_path) +- [Function `get_max_degree`](#0x1_big_ordered_map_get_max_degree) +- [Function `replace_root`](#0x1_big_ordered_map_replace_root) +- [Function `add_at`](#0x1_big_ordered_map_add_at) +- [Function `update_key`](#0x1_big_ordered_map_update_key) +- [Function `remove_at`](#0x1_big_ordered_map_remove_at) +- [Specification](#@Specification_1) + - [Function `add_at`](#@Specification_1_add_at) + - [Function `remove_at`](#@Specification_1_remove_at) + + +
use 0x1::bcs;
+use 0x1::cmp;
+use 0x1::error;
+use 0x1::math64;
+use 0x1::option;
+use 0x1::ordered_map;
+use 0x1::storage_slots_allocator;
+use 0x1::vector;
+
+ + + + + +## Enum `Node` + +A node of the BigOrderedMap. + +Inner node will have all children be Child::Inner, pointing to the child nodes. +Leaf node will have all children be Child::Leaf. +Basically - Leaf node is a single-resource OrderedMap, containing as much key/value entries, as can fit. +So Leaf node contains multiple values, not just one. + + +
enum Node<K: store, V: store> has store
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+is_leaf: bool +
+
+ +
+
+children: ordered_map::OrderedMap<K, big_ordered_map::Child<V>> +
+
+ +
+
+prev: u64 +
+
+ +
+
+next: u64 +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Enum `Child` + +Contents of a child node. + + +
enum Child<V: store> has store
+
+ + + +
+Variants + + +
+Inner + + +
+Fields + + +
+
+node_index: storage_slots_allocator::StoredSlot +
+
+ +
+
+ + +
+ +
+ +
+Leaf + + +
+Fields + + +
+
+value: V +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Enum `IteratorPtr` + +An iterator to iterate all keys in the BigOrderedMap. + +TODO: Once fields can be (mutable) references, this class will be deprecated. + + +
enum IteratorPtr<K> has copy, drop
+
+ + + +
+Variants + + +
+End + + +
+Fields + + +
+
+ + +
+ +
+ +
+Some + + +
+Fields + + +
+
+node_index: u64 +
+
+ The node index of the iterator pointing to. +
+
+child_iter: ordered_map::IteratorPtr +
+
+ Child iter it is pointing to +
+
+key: K +
+
+ key to which (node_index, child_iter) are pointing to + cache to not require borrowing global resources to fetch again +
+
+ + +
+ +
+ +
+ + + +## Enum `BigOrderedMap` + +The BigOrderedMap data structure. + + +
enum BigOrderedMap<K: store, V: store> has store
+
+ + + +
+Variants + + +
+BPlusTreeMap + + +
+Fields + + +
+
+root: big_ordered_map::Node<K, V> +
+
+ Root node. It is stored directly in the resource itself, unlike all other nodes. +
+
+nodes: storage_slots_allocator::StorageSlotsAllocator<big_ordered_map::Node<K, V>> +
+
+ Storage of all non-root nodes. They are stored in separate storage slots. +
+
+min_leaf_index: u64 +
+
+ The node index of the leftmost node. +
+
+max_leaf_index: u64 +
+
+ The node index of the rightmost node. +
+
+constant_kv_size: bool +
+
+ Whether Key and Value have constant serialized size, and if so, + optimize out size checks on every insert. +
+
+inner_max_degree: u16 +
+
+ The max number of children an inner node can have. +
+
+leaf_max_degree: u16 +
+
+ The max number of children a leaf node can have. +
+
+ + +
+ +
+ +
+ + + +## Constants + + + + +Internal errors. + + +
const EINTERNAL_INVARIANT_BROKEN: u64 = 20;
+
+ + + + + + + +
const NULL_INDEX: u64 = 0;
+
+ + + + + +Trying to do an operation on an IteratorPtr that would go out of bounds + + +
const EITER_OUT_OF_BOUNDS: u64 = 3;
+
+ + + + + +Map key already exists + + +
const EKEY_ALREADY_EXISTS: u64 = 1;
+
+ + + + + +Map key is not found + + +
const EKEY_NOT_FOUND: u64 = 2;
+
+ + + + + +When using default constructors (new() / new_with_reusable() / new_with_config(0, 0, _)) +making sure key or value of this size (5KB) will be accepted, which should satisfy most cases +If you need keys/values that are larger, use other constructors. + + +
const DEFAULT_MAX_KEY_OR_VALUE_SIZE: u64 = 5120;
+
+ + + + + +Target node size, from efficiency perspective. + + +
const DEFAULT_TARGET_NODE_SIZE: u64 = 4096;
+
+ + + + + +Trying to insert too large of an (key, value) into the map. + + +
const EARGUMENT_BYTES_TOO_LARGE: u64 = 13;
+
+ + + + + +borrow_mut requires that key and value types have constant size +(otherwise it wouldn't be able to guarantee size requirements are not violated) +Use remove() + add() combo instead. + + +
const EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE: u64 = 14;
+
+ + + + + +Cannot use new/new_with_reusable with variable-sized types. +Use new_with_type_size_hints() or new_with_config() instead if your types have variable sizes. +new_with_config(0, 0, false) tries to work reasonably well for variety of sizes +(allows keys or values of at least 5KB and 100x larger than the first inserted) + + +
const ECANNOT_USE_NEW_WITH_VARIABLE_SIZED_TYPES: u64 = 16;
+
+ + + + + +The provided configuration parameter is invalid. + + +
const EINVALID_CONFIG_PARAMETER: u64 = 11;
+
+ + + + + +Trying to insert too large of a key into the map. + + +
const EKEY_BYTES_TOO_LARGE: u64 = 15;
+
+ + + + + +Map isn't empty + + +
const EMAP_NOT_EMPTY: u64 = 12;
+
+ + + + + +Target max node size, when using hints (via new_with_type_size_hints). +Smaller than MAX_NODE_BYTES, to improve performence, as large nodes are innefficient. + + +
const HINT_MAX_NODE_BYTES: u64 = 131072;
+
+ + + + + +Smallest allowed degree on inner nodes. + + +
const INNER_MIN_DEGREE: u16 = 4;
+
+ + + + + +Smallest allowed degree on leaf nodes. + +We rely on 1 being valid size only for root node, +so this cannot be below 3 (unless that is changed) + + +
const LEAF_MIN_DEGREE: u16 = 3;
+
+ + + + + +Largest degree allowed (both for inner and leaf nodes) + + +
const MAX_DEGREE: u64 = 4096;
+
+ + + + + +Largest size all keys for inner nodes or key-value pairs for leaf nodes can have. +Node itself can be a bit larger, due to few other accounting fields. +This is a bit conservative, a bit less than half of the resource limit (which is 1MB) + + +
const MAX_NODE_BYTES: u64 = 409600;
+
+ + + + + + + +
const ROOT_INDEX: u64 = 1;
+
+ + + + + +## Function `new` + +Returns a new BigOrderedMap with the default configuration. + +Cannot be used with variable-sized types. +Use new_with_type_size_hints() or new_with_config() instead if your types have variable sizes. +new_with_config(0, 0, false) tries to work reasonably well for variety of sizes +(allows keys or values of at least 5KB and 100x larger than the first inserted) + + +
public fun new<K: store, V: store>(): big_ordered_map::BigOrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun new<K: store, V: store>(): BigOrderedMap<K, V> {
+    assert!(
+        bcs::constant_serialized_size<K>().is_some() && bcs::constant_serialized_size<V>().is_some(),
+        error::invalid_argument(ECANNOT_USE_NEW_WITH_VARIABLE_SIZED_TYPES)
+    );
+    new_with_config(0, 0, false)
+}
+
+ + + +
+ + + +## Function `new_with_reusable` + +Returns a new BigOrderedMap with with reusable storage slots. + +Cannot be used with variable-sized types. +Use new_with_type_size_hints() or new_with_config() instead if your types have variable sizes. +new_with_config(0, 0, false) tries to work reasonably well for variety of sizes +(allows keys or values of at least 5KB and 100x larger than the first inserted) + + +
public fun new_with_reusable<K: store, V: store>(): big_ordered_map::BigOrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun new_with_reusable<K: store, V: store>(): BigOrderedMap<K, V> {
+    assert!(
+        bcs::constant_serialized_size<K>().is_some() && bcs::constant_serialized_size<V>().is_some(),
+        error::invalid_argument(ECANNOT_USE_NEW_WITH_VARIABLE_SIZED_TYPES)
+    );
+    new_with_config(0, 0, true)
+}
+
+ + + +
+ + + +## Function `new_with_type_size_hints` + +Returns a new BigOrderedMap, configured based on passed key and value serialized size hints. + + +
public fun new_with_type_size_hints<K: store, V: store>(avg_key_bytes: u64, max_key_bytes: u64, avg_value_bytes: u64, max_value_bytes: u64): big_ordered_map::BigOrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun new_with_type_size_hints<K: store, V: store>(avg_key_bytes: u64, max_key_bytes: u64, avg_value_bytes: u64, max_value_bytes: u64): BigOrderedMap<K, V> {
+    assert!(avg_key_bytes <= max_key_bytes, error::invalid_argument(EINVALID_CONFIG_PARAMETER));
+    assert!(avg_value_bytes <= max_value_bytes, error::invalid_argument(EINVALID_CONFIG_PARAMETER));
+
+    let inner_max_degree_from_avg = max(min(MAX_DEGREE, DEFAULT_TARGET_NODE_SIZE / avg_key_bytes), INNER_MIN_DEGREE as u64);
+    let inner_max_degree_from_max = HINT_MAX_NODE_BYTES / max_key_bytes;
+    assert!(inner_max_degree_from_max >= (INNER_MIN_DEGREE as u64), error::invalid_argument(EINVALID_CONFIG_PARAMETER));
+
+    let avg_entry_size = avg_key_bytes + avg_value_bytes;
+    let max_entry_size = max_key_bytes + max_value_bytes;
+
+    let leaf_max_degree_from_avg = max(min(MAX_DEGREE, DEFAULT_TARGET_NODE_SIZE / avg_entry_size), LEAF_MIN_DEGREE as u64);
+    let leaf_max_degree_from_max = HINT_MAX_NODE_BYTES / max_entry_size;
+    assert!(leaf_max_degree_from_max >= (INNER_MIN_DEGREE as u64), error::invalid_argument(EINVALID_CONFIG_PARAMETER));
+
+    new_with_config(
+        min(inner_max_degree_from_avg, inner_max_degree_from_max) as u16,
+        min(leaf_max_degree_from_avg, leaf_max_degree_from_max) as u16,
+        false,
+    )
+}
+
+ + + +
+ + + +## Function `new_with_config` + +Returns a new BigOrderedMap with the provided max degree consts (the maximum # of children a node can have, both inner and leaf). + +If 0 is passed, then it is dynamically computed based on size of first key and value. +WIth 0 it is configured to accept keys and values up to 5KB in size, +or as large as 100x the size of the first insert. (100 = MAX_NODE_BYTES / DEFAULT_TARGET_NODE_SIZE) + +Sizes of all elements must respect (or their additions will be rejected): +key_size * inner_max_degree <= MAX_NODE_BYTES +entry_size * leaf_max_degree <= MAX_NODE_BYTES +If keys or values have variable size, and first element could be non-representative in size (i.e. smaller than future ones), +it is important to compute and pass inner_max_degree and leaf_max_degree based on the largest element you want to be able to insert. + +reuse_slots means that removing elements from the map doesn't free the storage slots and returns the refund. +Together with allocate_spare_slots, it allows to preallocate slots and have inserts have predictable gas costs. +(otherwise, inserts that require map to add new nodes, cost significantly more, compared to the rest) + + +
public fun new_with_config<K: store, V: store>(inner_max_degree: u16, leaf_max_degree: u16, reuse_slots: bool): big_ordered_map::BigOrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun new_with_config<K: store, V: store>(inner_max_degree: u16, leaf_max_degree: u16, reuse_slots: bool): BigOrderedMap<K, V> {
+    assert!(inner_max_degree == 0 || (inner_max_degree >= INNER_MIN_DEGREE && (inner_max_degree as u64) <= MAX_DEGREE), error::invalid_argument(EINVALID_CONFIG_PARAMETER));
+    assert!(leaf_max_degree == 0 || (leaf_max_degree >= LEAF_MIN_DEGREE && (leaf_max_degree as u64) <= MAX_DEGREE), error::invalid_argument(EINVALID_CONFIG_PARAMETER));
+
+    // Assert that storage_slots_allocator special indices are aligned:
+    assert!(storage_slots_allocator::is_null_index(NULL_INDEX), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    assert!(storage_slots_allocator::is_special_unused_index(ROOT_INDEX), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+
+    let nodes = storage_slots_allocator::new(reuse_slots);
+
+    let self = BigOrderedMap::BPlusTreeMap {
+        root: new_node(/*is_leaf=*/true),
+        nodes: nodes,
+        min_leaf_index: ROOT_INDEX,
+        max_leaf_index: ROOT_INDEX,
+        constant_kv_size: false, // Will be initialized in validate_static_size_and_init_max_degrees below.
+        inner_max_degree: inner_max_degree,
+        leaf_max_degree: leaf_max_degree
+    };
+    self.validate_static_size_and_init_max_degrees();
+    self
+}
+
+ + + +
+ + + +## Function `new_from` + +Create a BigOrderedMap from a vector of keys and values, with default configuration. +Aborts with EKEY_ALREADY_EXISTS if duplicate keys are passed in. + + +
public fun new_from<K: copy, drop, store, V: store>(keys: vector<K>, values: vector<V>): big_ordered_map::BigOrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun new_from<K: drop + copy + store, V: store>(keys: vector<K>, values: vector<V>): BigOrderedMap<K, V> {
+    let map = new();
+    map.add_all(keys, values);
+    map
+}
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroys the map if it's empty, otherwise aborts. + + +
public fun destroy_empty<K: store, V: store>(self: big_ordered_map::BigOrderedMap<K, V>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<K: store, V: store>(self: BigOrderedMap<K, V>) {
+    let BigOrderedMap::BPlusTreeMap { root, nodes, min_leaf_index: _, max_leaf_index: _, constant_kv_size: _, inner_max_degree: _, leaf_max_degree: _ } = self;
+    root.destroy_empty_node();
+    // If root node is empty, then we know that no storage slots are used,
+    // and so we can safely destroy all nodes.
+    nodes.destroy_empty();
+}
+
+ + + +
+ + + +## Function `allocate_spare_slots` + +Map was created with reuse_slots=true, you can allocate spare slots, to pay storage fee now, to +allow future insertions to not require any storage slot creation - making their gas more predictable +and better bounded/fair. +(otherwsie, unlucky inserts create new storage slots and are charge more for it) + + +
public fun allocate_spare_slots<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, num_to_allocate: u64)
+
+ + + +
+Implementation + + +
public fun allocate_spare_slots<K: store, V: store>(self: &mut BigOrderedMap<K, V>, num_to_allocate: u64) {
+    self.nodes.allocate_spare_slots(num_to_allocate)
+}
+
+ + + +
+ + + +## Function `is_empty` + +Returns true iff the BigOrderedMap is empty. + + +
public fun is_empty<K: store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): bool
+
+ + + +
+Implementation + + +
public fun is_empty<K: store, V: store>(self: &BigOrderedMap<K, V>): bool {
+    let node = self.borrow_node(self.min_leaf_index);
+    node.children.is_empty()
+}
+
+ + + +
+ + + +## Function `compute_length` + +Returns the number of elements in the BigOrderedMap. +This is an expensive function, as it goes through all the leaves to compute it. + + +
public fun compute_length<K: store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): u64
+
+ + + +
+Implementation + + +
public fun compute_length<K: store, V: store>(self: &BigOrderedMap<K, V>): u64 {
+    let size = 0;
+    self.for_each_leaf_node_ref(|node| {
+        size += node.children.length();
+    });
+    size
+}
+
+ + + +
+ + + +## Function `add` + +Inserts the key/value into the BigOrderedMap. +Aborts if the key is already in the map. + + +
public fun add<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: K, value: V)
+
+ + + +
+Implementation + + +
public fun add<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, key: K, value: V) {
+    self.add_or_upsert_impl(key, value, false).destroy_none()
+}
+
+ + + +
+ + + +## Function `upsert` + +If the key doesn't exist in the map, inserts the key/value, and returns none. +Otherwise updates the value under the given key, and returns the old value. + + +
public fun upsert<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: K, value: V): option::Option<V>
+
+ + + +
+Implementation + + +
public fun upsert<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, key: K, value: V): Option<V> {
+    let result = self.add_or_upsert_impl(key, value, true);
+    if (result.is_some()) {
+        let Child::Leaf {
+            value: old_value,
+        } = result.destroy_some();
+        option::some(old_value)
+    } else {
+        result.destroy_none();
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `remove` + +Removes the entry from BigOrderedMap and returns the value which key maps to. +Aborts if there is no entry for key. + + +
public fun remove<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: &K): V
+
+ + + +
+Implementation + + +
public fun remove<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, key: &K): V {
+    // Optimize case where only root node exists
+    // (optimizes out borrowing and path creation in `find_leaf_path`)
+    if (self.root.is_leaf) {
+        let Child::Leaf {
+            value,
+        } = self.root.children.remove(key);
+        return value;
+    };
+
+    let path_to_leaf = self.find_leaf_path(key);
+
+    assert!(!path_to_leaf.is_empty(), error::invalid_argument(EKEY_NOT_FOUND));
+
+    let Child::Leaf {
+        value,
+    } = self.remove_at(path_to_leaf, key);
+    value
+}
+
+ + + +
+ + + +## Function `add_all` + +Add multiple key/value pairs to the map. The keys must not already exist. +Aborts with EKEY_ALREADY_EXISTS if key already exist, or duplicate keys are passed in. + + +
public fun add_all<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, keys: vector<K>, values: vector<V>)
+
+ + + +
+Implementation + + +
public fun add_all<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, keys: vector<K>, values: vector<V>) {
+    // TODO: Can be optimized, both in insertion order (largest first, then from smallest),
+    // as well as on initializing inner_max_degree/leaf_max_degree better
+    keys.zip(values, |key, value| {
+        self.add(key, value);
+    });
+}
+
+ + + +
+ + + +## Function `pop_front` + + + +
public fun pop_front<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>): (K, V)
+
+ + + +
+Implementation + + +
public fun pop_front<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>): (K, V) {
+    let it = self.new_begin_iter();
+    let k = *it.iter_borrow_key();
+    let v = self.remove(&k);
+    (k, v)
+}
+
+ + + +
+ + + +## Function `pop_back` + + + +
public fun pop_back<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>): (K, V)
+
+ + + +
+Implementation + + +
public fun pop_back<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>): (K, V) {
+    let it = self.new_end_iter().iter_prev(self);
+    let k = *it.iter_borrow_key();
+    let v = self.remove(&k);
+    (k, v)
+}
+
+ + + +
+ + + +## Function `lower_bound` + +Returns an iterator pointing to the first element that is greater or equal to the provided +key, or an end iterator if such element doesn't exist. + + +
public(friend) fun lower_bound<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): big_ordered_map::IteratorPtr<K>
+
+ + + +
+Implementation + + +
public(friend) fun lower_bound<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, key: &K): IteratorPtr<K> {
+    let leaf = self.find_leaf(key);
+    if (leaf == NULL_INDEX) {
+        return self.new_end_iter()
+    };
+
+    let node = self.borrow_node(leaf);
+    assert!(node.is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+
+    let child_lower_bound = node.children.lower_bound(key);
+    if (child_lower_bound.iter_is_end(&node.children)) {
+        self.new_end_iter()
+    } else {
+        let iter_key = *child_lower_bound.iter_borrow_key(&node.children);
+        new_iter(leaf, child_lower_bound, iter_key)
+    }
+}
+
+ + + +
+ + + +## Function `find` + +Returns an iterator pointing to the element that equals to the provided key, or an end +iterator if the key is not found. + + +
public(friend) fun find<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): big_ordered_map::IteratorPtr<K>
+
+ + + +
+Implementation + + +
public(friend) fun find<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, key: &K): IteratorPtr<K> {
+    let lower_bound = self.lower_bound(key);
+    if (lower_bound.iter_is_end(self)) {
+        lower_bound
+    } else if (&lower_bound.key == key) {
+        lower_bound
+    } else {
+        self.new_end_iter()
+    }
+}
+
+ + + +
+ + + +## Function `contains` + +Returns true iff the key exists in the map. + + +
public fun contains<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): bool
+
+ + + +
+Implementation + + +
public fun contains<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, key: &K): bool {
+    let lower_bound = self.lower_bound(key);
+    if (lower_bound.iter_is_end(self)) {
+        false
+    } else if (&lower_bound.key == key) {
+        true
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `borrow` + +Returns a reference to the element with its key, aborts if the key is not found. + + +
public fun borrow<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): &V
+
+ + + +
+Implementation + + +
public fun borrow<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, key: &K): &V {
+    let iter = self.find(key);
+    assert!(!iter.iter_is_end(self), error::invalid_argument(EKEY_NOT_FOUND));
+
+    iter.iter_borrow(self)
+}
+
+ + + +
+ + + +## Function `get` + + + +
public fun get<K: copy, drop, store, V: copy, store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): option::Option<V>
+
+ + + +
+Implementation + + +
public fun get<K: drop + copy + store, V: copy + store>(self: &BigOrderedMap<K, V>, key: &K): Option<V> {
+    let iter = self.find(key);
+    if (iter.iter_is_end(self)) {
+        option::none()
+    } else {
+        option::some(*iter.iter_borrow(self))
+    }
+}
+
+ + + +
+ + + +## Function `borrow_mut` + +Returns a mutable reference to the element with its key at the given index, aborts if the key is not found. +Aborts with EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE if KV size doesn't have constant size, +because if it doesn't we cannot assert invariants on the size. +In case of variable size, use either borrow, copy then upsert, or remove and add instead of mutable borrow. + + +
public fun borrow_mut<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: &K): &mut V
+
+ + + +
+Implementation + + +
public fun borrow_mut<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, key: &K): &mut V {
+    let iter = self.find(key);
+    assert!(!iter.iter_is_end(self), error::invalid_argument(EKEY_NOT_FOUND));
+    iter.iter_borrow_mut(self)
+}
+
+ + + +
+ + + +## Function `borrow_front` + + + +
public fun borrow_front<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): (K, &V)
+
+ + + +
+Implementation + + +
public fun borrow_front<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>): (K, &V) {
+    let it = self.new_begin_iter();
+    let key = *it.iter_borrow_key();
+    (key, it.iter_borrow(self))
+}
+
+ + + +
+ + + +## Function `borrow_back` + + + +
public fun borrow_back<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): (K, &V)
+
+ + + +
+Implementation + + +
public fun borrow_back<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>): (K, &V) {
+    let it = self.new_end_iter().iter_prev(self);
+    let key = *it.iter_borrow_key();
+    (key, it.iter_borrow(self))
+}
+
+ + + +
+ + + +## Function `prev_key` + + + +
public fun prev_key<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): option::Option<K>
+
+ + + +
+Implementation + + +
public fun prev_key<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, key: &K): Option<K> {
+    let it = self.lower_bound(key);
+    if (it.iter_is_begin(self)) {
+        option::none()
+    } else {
+        option::some(*it.iter_prev(self).iter_borrow_key())
+    }
+}
+
+ + + +
+ + + +## Function `next_key` + + + +
public fun next_key<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): option::Option<K>
+
+ + + +
+Implementation + + +
public fun next_key<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, key: &K): Option<K> {
+    let it = self.lower_bound(key);
+    if (it.iter_is_end(self)) {
+        option::none()
+    } else {
+        let cur_key = it.iter_borrow_key();
+        if (key == cur_key) {
+            let it = it.iter_next(self);
+            if (it.iter_is_end(self)) {
+                option::none()
+            } else {
+                option::some(*it.iter_borrow_key())
+            }
+        } else {
+            option::some(*cur_key)
+        }
+    }
+}
+
+ + + +
+ + + +## Function `to_ordered_map` + +Convert a BigOrderedMap to an OrderedMap, which is supposed to be called mostly by view functions to get an atomic +view of the whole map. +Disclaimer: This function may be costly as the BigOrderedMap may be huge in size. Use it at your own discretion. + + +
public fun to_ordered_map<K: copy, drop, store, V: copy, store>(self: &big_ordered_map::BigOrderedMap<K, V>): ordered_map::OrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun to_ordered_map<K: drop + copy + store, V: copy + store>(self: &BigOrderedMap<K, V>): OrderedMap<K, V> {
+    let result = ordered_map::new();
+    self.for_each_ref_friend(|k, v| {
+        result.new_end_iter().iter_add(&mut result, *k, *v);
+    });
+    result
+}
+
+ + + +
+ + + +## Function `keys` + +Get all keys. + +For a large enough BigOrderedMap this function will fail due to execution gas limits, +use iterartor or next_key/prev_key to iterate over across portion of the map. + + +
public fun keys<K: copy, drop, store, V: copy, store>(self: &big_ordered_map::BigOrderedMap<K, V>): vector<K>
+
+ + + +
+Implementation + + +
public fun keys<K: store + copy + drop, V: store + copy>(self: &BigOrderedMap<K, V>): vector<K> {
+    let result = vector[];
+    self.for_each_ref_friend(|k, _v| {
+        result.push_back(*k);
+    });
+    result
+}
+
+ + + +
+ + + +## Function `for_each_and_clear` + +Apply the function to each element in the vector, consuming it, leaving the map empty. + +Current implementation is O(n * log(n)). After function values will be optimized +to O(n). + + +
public fun for_each_and_clear<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, f: |(K, V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each_and_clear<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, f: |K, V|) {
+    // TODO - this can be done more efficiently, by destroying the leaves directly
+    // but that requires more complicated code and testing.
+    while (!self.is_empty()) {
+        let (k, v) = self.pop_front();
+        f(k, v);
+    };
+}
+
+ + + +
+ + + +## Function `for_each` + +Apply the function to each element in the vector, consuming it, and consuming the map + +Current implementation is O(n * log(n)). After function values will be optimized +to O(n). + + +
public fun for_each<K: copy, drop, store, V: store>(self: big_ordered_map::BigOrderedMap<K, V>, f: |(K, V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each<K: drop + copy + store, V: store>(self: BigOrderedMap<K, V>, f: |K, V|) {
+    // TODO - this can be done more efficiently, by destroying the leaves directly
+    // but that requires more complicated code and testing.
+    self.for_each_and_clear(|k, v| f(k, v));
+    self.destroy_empty()
+}
+
+ + + +
+ + + +## Function `for_each_ref` + +Apply the function to a reference of each element in the vector. + +Current implementation is O(n * log(n)). After function values will be optimized +to O(n). + + +
public fun for_each_ref<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, f: |(&K, &V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each_ref<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, f: |&K, &V|) {
+    // This implementation is innefficient: O(log(n)) for next_key / borrow lookups every time,
+    // but is the only one available through the public API.
+    if (!self.is_empty()) {
+        let (k, v) = self.borrow_front();
+        f(&k, v);
+
+        let cur_k = self.next_key(&k);
+        while (cur_k.is_some()) {
+            let k = cur_k.destroy_some();
+            f(&k, self.borrow(&k));
+
+            cur_k = self.next_key(&k);
+        };
+    };
+
+    // TODO use this more efficient implementation when function values are enabled.
+    // self.for_each_leaf_node_ref(|node| {
+    //     node.children.for_each_ref(|k: &K, v: &Child<V>| {
+    //         f(k, &v.value);
+    //     });
+    // })
+}
+
+ + + +
+ + + +## Function `for_each_ref_friend` + + + +
public(friend) fun for_each_ref_friend<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, f: |(&K, &V)|)
+
+ + + +
+Implementation + + +
public(friend) inline fun for_each_ref_friend<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, f: |&K, &V|) {
+    self.for_each_leaf_node_ref(|node| {
+        node.children.for_each_ref_friend(|k: &K, v: &Child<V>| {
+            f(k, &v.value);
+        });
+    })
+}
+
+ + + +
+ + + +## Function `for_each_mut` + +Apply the function to a mutable reference of each key-value pair in the map. + +Current implementation is O(n * log(n)). After function values will be optimized +to O(n). + + +
public fun for_each_mut<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, f: |(&K, &mut V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each_mut<K: copy + drop + store, V: store>(self: &mut BigOrderedMap<K, V>, f: |&K, &mut V|) {
+    // This implementation is innefficient: O(log(n)) for next_key / borrow lookups every time,
+    // but is the only one available through the public API.
+    if (!self.is_empty()) {
+        let (k, _v) = self.borrow_front();
+
+        let done = false;
+        while (!done) {
+            f(&k, self.borrow_mut(&k));
+
+            let cur_k = self.next_key(&k);
+            if (cur_k.is_some()) {
+                k = cur_k.destroy_some();
+            } else {
+                done = true;
+            }
+        };
+    };
+
+    // TODO: if we make iterator api public update to:
+    // let iter = self.new_begin_iter();
+    // while (!iter.iter_is_end(self)) {
+    //     let key = *iter.iter_borrow_key(self);
+    //     f(key, iter.iter_borrow_mut(self));
+    //     iter = iter.iter_next(self);
+    // }
+}
+
+ + + +
+ + + +## Function `destroy` + +Destroy a map, by destroying elements individually. + +Current implementation is O(n * log(n)). After function values will be optimized +to O(n). + + +
public fun destroy<K: copy, drop, store, V: store>(self: big_ordered_map::BigOrderedMap<K, V>, dv: |V|)
+
+ + + +
+Implementation + + +
public inline fun destroy<K: drop + copy + store, V: store>(self: BigOrderedMap<K, V>, dv: |V|) {
+    self.for_each(|_k, v| {
+        dv(v);
+    });
+}
+
+ + + +
+ + + +## Function `new_begin_iter` + +Returns the begin iterator. + + +
public(friend) fun new_begin_iter<K: copy, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): big_ordered_map::IteratorPtr<K>
+
+ + + +
+Implementation + + +
public(friend) fun new_begin_iter<K: copy + store, V: store>(self: &BigOrderedMap<K, V>): IteratorPtr<K> {
+    if (self.is_empty()) {
+        return IteratorPtr::End;
+    };
+
+    let node = self.borrow_node(self.min_leaf_index);
+    assert!(!node.children.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    let begin_child_iter = node.children.new_begin_iter();
+    let begin_child_key = *begin_child_iter.iter_borrow_key(&node.children);
+    new_iter(self.min_leaf_index, begin_child_iter, begin_child_key)
+}
+
+ + + +
+ + + +## Function `new_end_iter` + +Returns the end iterator. + + +
public(friend) fun new_end_iter<K: copy, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>): big_ordered_map::IteratorPtr<K>
+
+ + + +
+Implementation + + +
public(friend) fun new_end_iter<K: copy + store, V: store>(self: &BigOrderedMap<K, V>): IteratorPtr<K> {
+    IteratorPtr::End
+}
+
+ + + +
+ + + +## Function `iter_is_begin` + + + +
public(friend) fun iter_is_begin<K: store, V: store>(self: &big_ordered_map::IteratorPtr<K>, map: &big_ordered_map::BigOrderedMap<K, V>): bool
+
+ + + +
+Implementation + + +
public(friend) fun iter_is_begin<K: store, V: store>(self: &IteratorPtr<K>, map: &BigOrderedMap<K, V>): bool {
+    if (self is IteratorPtr::End<K>) {
+        map.is_empty()
+    } else {
+        (self.node_index == map.min_leaf_index && self.child_iter.iter_is_begin_from_non_empty())
+    }
+}
+
+ + + +
+ + + +## Function `iter_is_end` + + + +
public(friend) fun iter_is_end<K: store, V: store>(self: &big_ordered_map::IteratorPtr<K>, _map: &big_ordered_map::BigOrderedMap<K, V>): bool
+
+ + + +
+Implementation + + +
public(friend) fun iter_is_end<K: store, V: store>(self: &IteratorPtr<K>, _map: &BigOrderedMap<K, V>): bool {
+    self is IteratorPtr::End<K>
+}
+
+ + + +
+ + + +## Function `iter_borrow_key` + +Borrows the key given iterator points to. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_borrow_key<K>(self: &big_ordered_map::IteratorPtr<K>): &K
+
+ + + +
+Implementation + + +
public(friend) fun iter_borrow_key<K>(self: &IteratorPtr<K>): &K {
+    assert!(!(self is IteratorPtr::End<K>), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+    &self.key
+}
+
+ + + +
+ + + +## Function `iter_borrow` + +Borrows the value given iterator points to. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_borrow<K: drop, store, V: store>(self: big_ordered_map::IteratorPtr<K>, map: &big_ordered_map::BigOrderedMap<K, V>): &V
+
+ + + +
+Implementation + + +
public(friend) fun iter_borrow<K: drop + store, V: store>(self: IteratorPtr<K>, map: &BigOrderedMap<K, V>): &V {
+    assert!(!self.iter_is_end(map), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+    let IteratorPtr::Some { node_index, child_iter, key: _ } = self;
+    let children = &map.borrow_node(node_index).children;
+    &child_iter.iter_borrow(children).value
+}
+
+ + + +
+ + + +## Function `iter_borrow_mut` + +Mutably borrows the value iterator points to. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Aborts with EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE if KV size doesn't have constant size, +because if it doesn't we cannot assert invariants on the size. +In case of variable size, use either borrow, copy then upsert, or remove and add instead of mutable borrow. + +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_borrow_mut<K: drop, store, V: store>(self: big_ordered_map::IteratorPtr<K>, map: &mut big_ordered_map::BigOrderedMap<K, V>): &mut V
+
+ + + +
+Implementation + + +
public(friend) fun iter_borrow_mut<K: drop + store, V: store>(self: IteratorPtr<K>, map: &mut BigOrderedMap<K, V>): &mut V {
+    assert!(map.constant_kv_size || bcs::constant_serialized_size<V>().is_some(), error::invalid_argument(EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE));
+    assert!(!self.iter_is_end(map), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+    let IteratorPtr::Some { node_index, child_iter, key: _ } = self;
+    let children = &mut map.borrow_node_mut(node_index).children;
+    &mut child_iter.iter_borrow_mut(children).value
+}
+
+ + + +
+ + + +## Function `iter_next` + +Returns the next iterator. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Requires the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_next<K: copy, drop, store, V: store>(self: big_ordered_map::IteratorPtr<K>, map: &big_ordered_map::BigOrderedMap<K, V>): big_ordered_map::IteratorPtr<K>
+
+ + + +
+Implementation + + +
public(friend) fun iter_next<K: drop + copy + store, V: store>(self: IteratorPtr<K>, map: &BigOrderedMap<K, V>): IteratorPtr<K> {
+    assert!(!(self is IteratorPtr::End<K>), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+
+    let node_index = self.node_index;
+    let node = map.borrow_node(node_index);
+
+    let child_iter = self.child_iter.iter_next(&node.children);
+    if (!child_iter.iter_is_end(&node.children)) {
+        // next is in the same leaf node
+        let iter_key = *child_iter.iter_borrow_key(&node.children);
+        return new_iter(node_index, child_iter, iter_key);
+    };
+
+    // next is in a different leaf node
+    let next_index = node.next;
+    if (next_index != NULL_INDEX) {
+        let next_node = map.borrow_node(next_index);
+
+        let child_iter = next_node.children.new_begin_iter();
+        assert!(!child_iter.iter_is_end(&next_node.children), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+        let iter_key = *child_iter.iter_borrow_key(&next_node.children);
+        return new_iter(next_index, child_iter, iter_key);
+    };
+
+    map.new_end_iter()
+}
+
+ + + +
+ + + +## Function `iter_prev` + +Returns the previous iterator. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the beginning. +Requires the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_prev<K: copy, drop, store, V: store>(self: big_ordered_map::IteratorPtr<K>, map: &big_ordered_map::BigOrderedMap<K, V>): big_ordered_map::IteratorPtr<K>
+
+ + + +
+Implementation + + +
public(friend) fun iter_prev<K: drop + copy + store, V: store>(self: IteratorPtr<K>, map: &BigOrderedMap<K, V>): IteratorPtr<K> {
+    let prev_index = if (self is IteratorPtr::End<K>) {
+        map.max_leaf_index
+    } else {
+        let node_index = self.node_index;
+        let node = map.borrow_node(node_index);
+
+        if (!self.child_iter.iter_is_begin(&node.children)) {
+            // next is in the same leaf node
+            let child_iter = self.child_iter.iter_prev(&node.children);
+            let key = *child_iter.iter_borrow_key(&node.children);
+            return new_iter(node_index, child_iter, key);
+        };
+        node.prev
+    };
+
+    assert!(prev_index != NULL_INDEX, error::invalid_argument(EITER_OUT_OF_BOUNDS));
+
+    // next is in a different leaf node
+    let prev_node = map.borrow_node(prev_index);
+
+    let prev_children = &prev_node.children;
+    let child_iter = prev_children.new_end_iter().iter_prev(prev_children);
+    let iter_key = *child_iter.iter_borrow_key(prev_children);
+    new_iter(prev_index, child_iter, iter_key)
+}
+
+ + + +
+ + + +## Function `for_each_leaf_node_ref` + + + +
fun for_each_leaf_node_ref<K: store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, f: |&big_ordered_map::Node<K, V>|)
+
+ + + +
+Implementation + + +
inline fun for_each_leaf_node_ref<K: store, V: store>(self: &BigOrderedMap<K, V>, f: |&Node<K, V>|) {
+    let cur_node_index = self.min_leaf_index;
+
+    while (cur_node_index != NULL_INDEX) {
+        let node = self.borrow_node(cur_node_index);
+        f(node);
+        cur_node_index = node.next;
+    }
+}
+
+ + + +
+ + + +## Function `borrow_node` + +Borrow a node, given an index. Works for both root (i.e. inline) node and separately stored nodes + + +
fun borrow_node<K: store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, node_index: u64): &big_ordered_map::Node<K, V>
+
+ + + +
+Implementation + + +
inline fun borrow_node<K: store, V: store>(self: &BigOrderedMap<K, V>, node_index: u64): &Node<K, V> {
+    if (node_index == ROOT_INDEX) {
+        &self.root
+    } else {
+        self.nodes.borrow(node_index)
+    }
+}
+
+ + + +
+ + + +## Function `borrow_node_mut` + +Borrow a node mutably, given an index. Works for both root (i.e. inline) node and separately stored nodes + + +
fun borrow_node_mut<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, node_index: u64): &mut big_ordered_map::Node<K, V>
+
+ + + +
+Implementation + + +
inline fun borrow_node_mut<K: store, V: store>(self: &mut BigOrderedMap<K, V>, node_index: u64): &mut Node<K, V> {
+    if (node_index == ROOT_INDEX) {
+        &mut self.root
+    } else {
+        self.nodes.borrow_mut(node_index)
+    }
+}
+
+ + + +
+ + + +## Function `add_or_upsert_impl` + + + +
fun add_or_upsert_impl<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: K, value: V, allow_overwrite: bool): option::Option<big_ordered_map::Child<V>>
+
+ + + +
+Implementation + + +
fun add_or_upsert_impl<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, key: K, value: V, allow_overwrite: bool): Option<Child<V>> {
+    if (!self.constant_kv_size) {
+        self.validate_dynamic_size_and_init_max_degrees(&key, &value);
+    };
+
+    // Optimize case where only root node exists
+    // (optimizes out borrowing and path creation in `find_leaf_path`)
+    if (self.root.is_leaf) {
+        let children = &mut self.root.children;
+        let degree = children.length();
+
+        if (degree < (self.leaf_max_degree as u64)) {
+            let result = children.upsert(key, new_leaf_child(value));
+            assert!(allow_overwrite || result.is_none(), error::invalid_argument(EKEY_ALREADY_EXISTS));
+            return result;
+        };
+    };
+
+    let path_to_leaf = self.find_leaf_path(&key);
+
+    if (path_to_leaf.is_empty()) {
+        // In this case, the key is greater than all keys in the map.
+        // So we need to update `key` in the pointers to the last (rightmost) child
+        // on every level, to maintain the invariant of `add_at`
+        // we also create a path_to_leaf to the rightmost leaf.
+        let current = ROOT_INDEX;
+
+        loop {
+            path_to_leaf.push_back(current);
+
+            let current_node = self.borrow_node_mut(current);
+            if (current_node.is_leaf) {
+                break;
+            };
+            let last_value = current_node.children.new_end_iter().iter_prev(¤t_node.children).iter_remove(&mut current_node.children);
+            current = last_value.node_index.stored_to_index();
+            current_node.children.add(key, last_value);
+        };
+    };
+
+    self.add_at(path_to_leaf, key, new_leaf_child(value), allow_overwrite)
+}
+
+ + + +
+ + + +## Function `validate_dynamic_size_and_init_max_degrees` + + + +
fun validate_dynamic_size_and_init_max_degrees<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key: &K, value: &V)
+
+ + + +
+Implementation + + +
fun validate_dynamic_size_and_init_max_degrees<K: store, V: store>(self: &mut BigOrderedMap<K, V>, key: &K, value: &V) {
+    let key_size = bcs::serialized_size(key);
+    let value_size = bcs::serialized_size(value);
+    self.validate_size_and_init_max_degrees(key_size, value_size)
+}
+
+ + + +
+ + + +## Function `validate_static_size_and_init_max_degrees` + + + +
fun validate_static_size_and_init_max_degrees<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>)
+
+ + + +
+Implementation + + +
fun validate_static_size_and_init_max_degrees<K: store, V: store>(self: &mut BigOrderedMap<K, V>) {
+    let key_size = bcs::constant_serialized_size<K>();
+    let value_size = bcs::constant_serialized_size<V>();
+
+    if (key_size.is_some()) {
+        let key_size = key_size.destroy_some();
+        if (self.inner_max_degree == 0) {
+            self.inner_max_degree = max(min(MAX_DEGREE, DEFAULT_TARGET_NODE_SIZE / key_size), INNER_MIN_DEGREE as u64) as u16;
+        };
+        assert!(key_size * (self.inner_max_degree as u64) <= MAX_NODE_BYTES, error::invalid_argument(EKEY_BYTES_TOO_LARGE));
+
+        if (value_size.is_some()) {
+            let value_size = value_size.destroy_some();
+            let entry_size = key_size + value_size;
+
+            if (self.leaf_max_degree == 0) {
+                self.leaf_max_degree = max(min(MAX_DEGREE, DEFAULT_TARGET_NODE_SIZE / entry_size), LEAF_MIN_DEGREE as u64) as u16;
+            };
+            assert!(entry_size * (self.leaf_max_degree as u64) <= MAX_NODE_BYTES, error::invalid_argument(EARGUMENT_BYTES_TOO_LARGE));
+
+            self.constant_kv_size = true;
+        };
+    }
+}
+
+ + + +
+ + + +## Function `validate_size_and_init_max_degrees` + + + +
fun validate_size_and_init_max_degrees<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, key_size: u64, value_size: u64)
+
+ + + +
+Implementation + + +
fun validate_size_and_init_max_degrees<K: store, V: store>(self: &mut BigOrderedMap<K, V>, key_size: u64, value_size: u64) {
+    let entry_size = key_size + value_size;
+
+    if (self.inner_max_degree == 0) {
+        let default_max_degree = min(MAX_DEGREE, MAX_NODE_BYTES / DEFAULT_MAX_KEY_OR_VALUE_SIZE);
+        self.inner_max_degree = max(min(default_max_degree, DEFAULT_TARGET_NODE_SIZE / key_size), INNER_MIN_DEGREE as u64) as u16;
+    };
+
+    if (self.leaf_max_degree == 0) {
+        let default_max_degree = min(MAX_DEGREE, MAX_NODE_BYTES / DEFAULT_MAX_KEY_OR_VALUE_SIZE / 2);
+        self.leaf_max_degree = max(min(default_max_degree, DEFAULT_TARGET_NODE_SIZE / entry_size), LEAF_MIN_DEGREE as u64) as u16;
+    };
+
+    // Make sure that no nodes can exceed the upper size limit.
+    assert!(key_size * (self.inner_max_degree as u64) <= MAX_NODE_BYTES, error::invalid_argument(EKEY_BYTES_TOO_LARGE));
+    assert!(entry_size * (self.leaf_max_degree as u64) <= MAX_NODE_BYTES, error::invalid_argument(EARGUMENT_BYTES_TOO_LARGE));
+}
+
+ + + +
+ + + +## Function `destroy_inner_child` + + + +
fun destroy_inner_child<V: store>(self: big_ordered_map::Child<V>): storage_slots_allocator::StoredSlot
+
+ + + +
+Implementation + + +
fun destroy_inner_child<V: store>(self: Child<V>): StoredSlot {
+    let Child::Inner {
+        node_index,
+    } = self;
+
+    node_index
+}
+
+ + + +
+ + + +## Function `destroy_empty_node` + + + +
fun destroy_empty_node<K: store, V: store>(self: big_ordered_map::Node<K, V>)
+
+ + + +
+Implementation + + +
fun destroy_empty_node<K: store, V: store>(self: Node<K, V>) {
+    let Node::V1 { children, is_leaf: _, prev: _, next: _ } = self;
+    assert!(children.is_empty(), error::invalid_argument(EMAP_NOT_EMPTY));
+    children.destroy_empty();
+}
+
+ + + +
+ + + +## Function `new_node` + + + +
fun new_node<K: store, V: store>(is_leaf: bool): big_ordered_map::Node<K, V>
+
+ + + +
+Implementation + + +
fun new_node<K: store, V: store>(is_leaf: bool): Node<K, V> {
+    Node::V1 {
+        is_leaf: is_leaf,
+        children: ordered_map::new(),
+        prev: NULL_INDEX,
+        next: NULL_INDEX,
+    }
+}
+
+ + + +
+ + + +## Function `new_node_with_children` + + + +
fun new_node_with_children<K: store, V: store>(is_leaf: bool, children: ordered_map::OrderedMap<K, big_ordered_map::Child<V>>): big_ordered_map::Node<K, V>
+
+ + + +
+Implementation + + +
fun new_node_with_children<K: store, V: store>(is_leaf: bool, children: OrderedMap<K, Child<V>>): Node<K, V> {
+    Node::V1 {
+        is_leaf: is_leaf,
+        children: children,
+        prev: NULL_INDEX,
+        next: NULL_INDEX,
+    }
+}
+
+ + + +
+ + + +## Function `new_inner_child` + + + +
fun new_inner_child<V: store>(node_index: storage_slots_allocator::StoredSlot): big_ordered_map::Child<V>
+
+ + + +
+Implementation + + +
fun new_inner_child<V: store>(node_index: StoredSlot): Child<V> {
+    Child::Inner {
+        node_index: node_index,
+    }
+}
+
+ + + +
+ + + +## Function `new_leaf_child` + + + +
fun new_leaf_child<V: store>(value: V): big_ordered_map::Child<V>
+
+ + + +
+Implementation + + +
fun new_leaf_child<V: store>(value: V): Child<V> {
+    Child::Leaf {
+        value: value,
+    }
+}
+
+ + + +
+ + + +## Function `new_iter` + + + +
fun new_iter<K>(node_index: u64, child_iter: ordered_map::IteratorPtr, key: K): big_ordered_map::IteratorPtr<K>
+
+ + + +
+Implementation + + +
fun new_iter<K>(node_index: u64, child_iter: ordered_map::IteratorPtr, key: K): IteratorPtr<K> {
+    IteratorPtr::Some {
+        node_index: node_index,
+        child_iter: child_iter,
+        key: key,
+    }
+}
+
+ + + +
+ + + +## Function `find_leaf` + +Find leaf where the given key would fall in. +So the largest leaf with its max_key <= key. +return NULL_INDEX if key is larger than any key currently stored in the map. + + +
fun find_leaf<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): u64
+
+ + + +
+Implementation + + +
fun find_leaf<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, key: &K): u64 {
+    let current = ROOT_INDEX;
+    loop {
+        let node = self.borrow_node(current);
+        if (node.is_leaf) {
+            return current;
+        };
+        let children = &node.children;
+        let child_iter = children.lower_bound(key);
+        if (child_iter.iter_is_end(children)) {
+            return NULL_INDEX;
+        } else {
+            current = child_iter.iter_borrow(children).node_index.stored_to_index();
+        };
+    }
+}
+
+ + + +
+ + + +## Function `find_leaf_path` + +Find leaf where the given key would fall in. +So the largest leaf with it's max_key <= key. +Returns the path from root to that leaf (including the leaf itself) +Returns empty path if key is larger than any key currently stored in the map. + + +
fun find_leaf_path<K: copy, drop, store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, key: &K): vector<u64>
+
+ + + +
+Implementation + + +
fun find_leaf_path<K: drop + copy + store, V: store>(self: &BigOrderedMap<K, V>, key: &K): vector<u64> {
+    let vec = vector::empty();
+
+    let current = ROOT_INDEX;
+    loop {
+        vec.push_back(current);
+
+        let node = self.borrow_node(current);
+        if (node.is_leaf) {
+            return vec;
+        };
+        let children = &node.children;
+        let child_iter = children.lower_bound(key);
+        if (child_iter.iter_is_end(children)) {
+            return vector::empty();
+        } else {
+            current = child_iter.iter_borrow(children).node_index.stored_to_index();
+        };
+    }
+}
+
+ + + +
+ + + +## Function `get_max_degree` + + + +
fun get_max_degree<K: store, V: store>(self: &big_ordered_map::BigOrderedMap<K, V>, leaf: bool): u64
+
+ + + +
+Implementation + + +
fun get_max_degree<K: store, V: store>(self: &BigOrderedMap<K, V>, leaf: bool): u64 {
+    if (leaf) {
+        self.leaf_max_degree as u64
+    } else {
+        self.inner_max_degree as u64
+    }
+}
+
+ + + +
+ + + +## Function `replace_root` + + + +
fun replace_root<K: store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, new_root: big_ordered_map::Node<K, V>): big_ordered_map::Node<K, V>
+
+ + + +
+Implementation + + +
fun replace_root<K: store, V: store>(self: &mut BigOrderedMap<K, V>, new_root: Node<K, V>): Node<K, V> {
+    // TODO: once mem::replace is made public/released, update to:
+    // mem::replace(&mut self.root, new_root_node)
+
+    let root = &mut self.root;
+    let tmp_is_leaf = root.is_leaf;
+    root.is_leaf = new_root.is_leaf;
+    new_root.is_leaf = tmp_is_leaf;
+
+    assert!(root.prev == NULL_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    assert!(root.next == NULL_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    assert!(new_root.prev == NULL_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    assert!(new_root.next == NULL_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+
+    // let tmp_prev = root.prev;
+    // root.prev = new_root.prev;
+    // new_root.prev = tmp_prev;
+
+    // let tmp_next = root.next;
+    // root.next = new_root.next;
+    // new_root.next = tmp_next;
+
+    let tmp_children = root.children.trim(0);
+    root.children.append_disjoint(new_root.children.trim(0));
+    new_root.children.append_disjoint(tmp_children);
+
+    new_root
+}
+
+ + + +
+ + + +## Function `add_at` + +Add a given child to a given node (last in the path_to_node), and update/rebalance the tree as necessary. +It is required that key pointers to the child node, on the path_to_node are greater or equal to the given key. +That means if we are adding a key larger than any currently existing in the map - we needed +to update key pointers on the path_to_node to include it, before calling this method. + +Returns Child previously associated with the given key. +If allow_overwrite is not set, function will abort if key is already present. + + +
fun add_at<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, path_to_node: vector<u64>, key: K, child: big_ordered_map::Child<V>, allow_overwrite: bool): option::Option<big_ordered_map::Child<V>>
+
+ + + +
+Implementation + + +
fun add_at<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, path_to_node: vector<u64>, key: K, child: Child<V>, allow_overwrite: bool): Option<Child<V>> {
+    // Last node in the path is one where we need to add the child to.
+    let node_index = path_to_node.pop_back();
+    {
+        // First check if we can perform this operation, without changing structure of the tree (i.e. without adding any nodes).
+
+        // For that we can just borrow the single node
+        let node = self.borrow_node_mut(node_index);
+        let children = &mut node.children;
+        let degree = children.length();
+
+        // Compute directly, as we cannot use get_max_degree(), as self is already mutably borrowed.
+        let max_degree = if (node.is_leaf) {
+            self.leaf_max_degree as u64
+        } else {
+            self.inner_max_degree as u64
+        };
+
+        if (degree < max_degree) {
+            // Adding a child to a current node doesn't exceed the size, so we can just do that.
+            let old_child = children.upsert(key, child);
+
+            if (node.is_leaf) {
+                assert!(allow_overwrite || old_child.is_none(), error::invalid_argument(EKEY_ALREADY_EXISTS));
+                return old_child;
+            } else {
+                assert!(!allow_overwrite && old_child.is_none(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+                return old_child;
+            };
+        };
+
+        // If we cannot add more nodes without exceeding the size,
+        // but node with `key` already exists, we either need to replace or abort.
+        let iter = children.find(&key);
+        if (!iter.iter_is_end(children)) {
+            assert!(node.is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+            assert!(allow_overwrite, error::invalid_argument(EKEY_ALREADY_EXISTS));
+
+            return option::some(iter.iter_replace(children, child));
+        }
+    };
+
+    // # of children in the current node exceeds the threshold, need to split into two nodes.
+
+    // If we are at the root, we need to move root node to become a child and have a new root node,
+    // in order to be able to split the node on the level it is.
+    let (reserved_slot, node) = if (node_index == ROOT_INDEX) {
+        assert!(path_to_node.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+
+        // Splitting root now, need to create a new root.
+        // Since root is stored direclty in the resource, we will swap-in the new node there.
+        let new_root_node = new_node<K, V>(/*is_leaf=*/false);
+
+        // Reserve a slot where the current root will be moved to.
+        let (replacement_node_slot, replacement_node_reserved_slot) = self.nodes.reserve_slot();
+
+        let max_key = {
+            let root_children = &self.root.children;
+            let max_key = *root_children.new_end_iter().iter_prev(root_children).iter_borrow_key(root_children);
+            // need to check if key is largest, as invariant is that "parent's pointers" have been updated,
+            // but key itself can be larger than all previous ones.
+            if (cmp::compare(&max_key, &key).is_lt()) {
+                max_key = key;
+            };
+            max_key
+        };
+        // New root will have start with a single child - the existing root (which will be at replacement location).
+        new_root_node.children.add(max_key, new_inner_child(replacement_node_slot));
+        let node = self.replace_root(new_root_node);
+
+        // we moved the currently processing node one level down, so we need to update the path
+        path_to_node.push_back(ROOT_INDEX);
+
+        let replacement_index = replacement_node_reserved_slot.reserved_to_index();
+        if (node.is_leaf) {
+            // replacement node is the only leaf, so we update the pointers:
+            self.min_leaf_index = replacement_index;
+            self.max_leaf_index = replacement_index;
+        };
+        (replacement_node_reserved_slot, node)
+    } else {
+        // In order to work on multiple nodes at the same time, we cannot borrow_mut, and need to be
+        // remove_and_reserve existing node.
+        let (cur_node_reserved_slot, node) = self.nodes.remove_and_reserve(node_index);
+        (cur_node_reserved_slot, node)
+    };
+
+    // move node_index out of scope, to make sure we don't accidentally access it, as we are done with it.
+    // (i.e. we should be using `reserved_slot` instead).
+    move node_index;
+
+    // Now we can perform the split at the current level, as we know we are not at the root level.
+    assert!(!path_to_node.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+
+    // Parent has a reference under max key to the current node, so existing index
+    // needs to be the right node.
+    // Since ordered_map::trim moves from the end (i.e. smaller keys stay),
+    // we are going to put the contents of the current node on the left side,
+    // and create a new right node.
+    // So if we had before (node_index, node), we will change that to end up having:
+    // (new_left_node_index, node trimmed off) and (node_index, new node with trimmed off children)
+    //
+    // So let's rename variables cleanly:
+    let right_node_reserved_slot = reserved_slot;
+    let left_node = node;
+
+    let is_leaf = left_node.is_leaf;
+    let left_children = &mut left_node.children;
+
+    let right_node_index = right_node_reserved_slot.reserved_to_index();
+    let left_next = &mut left_node.next;
+    let left_prev = &mut left_node.prev;
+
+    // Compute directly, as we cannot use get_max_degree(), as self is already mutably borrowed.
+    let max_degree = if (is_leaf) {
+        self.leaf_max_degree as u64
+    } else {
+        self.inner_max_degree as u64
+    };
+    // compute the target size for the left node:
+    let target_size = (max_degree + 1) / 2;
+
+    // Add child (which will exceed the size), and then trim off to create two sets of children of correct sizes.
+    left_children.add(key, child);
+    let right_node_children = left_children.trim(target_size);
+
+    assert!(left_children.length() <= max_degree, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    assert!(right_node_children.length() <= max_degree, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+
+    let right_node = new_node_with_children(is_leaf, right_node_children);
+
+    let (left_node_slot, left_node_reserved_slot) = self.nodes.reserve_slot();
+    let left_node_index = left_node_slot.stored_to_index();
+
+    // right nodes next is the node that was next of the left (previous) node, and next of left node is the right node.
+    right_node.next = *left_next;
+    *left_next = right_node_index;
+
+    // right node's prev becomes current left node
+    right_node.prev = left_node_index;
+    // Since the previously used index is going to the right node, `prev` pointer of the next node is correct,
+    // and we need to update next pointer of the previous node (if exists)
+    if (*left_prev != NULL_INDEX) {
+        self.nodes.borrow_mut(*left_prev).next = left_node_index;
+        assert!(right_node_index != self.min_leaf_index, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    } else if (right_node_index == self.min_leaf_index) {
+        // Otherwise, if we were the smallest node on the level. if this is the leaf level, update the pointer.
+        assert!(is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+        self.min_leaf_index = left_node_index;
+    };
+
+    // Largest left key is the split key.
+    let max_left_key = *left_children.new_end_iter().iter_prev(left_children).iter_borrow_key(left_children);
+
+    self.nodes.fill_reserved_slot(left_node_reserved_slot, left_node);
+    self.nodes.fill_reserved_slot(right_node_reserved_slot, right_node);
+
+    // Add new Child (i.e. pointer to the left node) in the parent.
+    self.add_at(path_to_node, max_left_key, new_inner_child(left_node_slot), false).destroy_none();
+    option::none()
+}
+
+ + + +
+ + + +## Function `update_key` + +Given a path to node (excluding the node itself), which is currently stored under "old_key", update "old_key" to "new_key". + + +
fun update_key<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, path_to_node: vector<u64>, old_key: &K, new_key: K)
+
+ + + +
+Implementation + + +
fun update_key<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, path_to_node: vector<u64>, old_key: &K, new_key: K) {
+    while (!path_to_node.is_empty()) {
+        let node_index = path_to_node.pop_back();
+        let node = self.borrow_node_mut(node_index);
+        let children = &mut node.children;
+        children.replace_key_inplace(old_key, new_key);
+
+        // If we were not updating the largest child, we don't need to continue.
+        if (children.new_end_iter().iter_prev(children).iter_borrow_key(children) != &new_key) {
+            return
+        };
+    }
+}
+
+ + + +
+ + + +## Function `remove_at` + + + +
fun remove_at<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, path_to_node: vector<u64>, key: &K): big_ordered_map::Child<V>
+
+ + + +
+Implementation + + +
fun remove_at<K: drop + copy + store, V: store>(self: &mut BigOrderedMap<K, V>, path_to_node: vector<u64>, key: &K): Child<V> {
+    // Last node in the path is one where we need to remove the child from.
+    let node_index = path_to_node.pop_back();
+    let old_child = {
+        // First check if we can perform this operation, without changing structure of the tree (i.e. without rebalancing any nodes).
+
+        // For that we can just borrow the single node
+        let node = self.borrow_node_mut(node_index);
+
+        let children = &mut node.children;
+        let is_leaf = node.is_leaf;
+
+        let old_child = children.remove(key);
+        if (node_index == ROOT_INDEX) {
+            // If current node is root, lower limit of max_degree/2 nodes doesn't apply.
+            // So we can adjust internally
+
+            assert!(path_to_node.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+
+            if (!is_leaf && children.length() == 1) {
+                // If root is not leaf, but has a single child, promote only child to root,
+                // and drop current root. Since root is stored directly in the resource, we
+                // "move" the child into the root.
+
+                let Child::Inner {
+                    node_index: inner_child_index,
+                } = children.new_end_iter().iter_prev(children).iter_remove(children);
+
+                let inner_child = self.nodes.remove(inner_child_index);
+                if (inner_child.is_leaf) {
+                    self.min_leaf_index = ROOT_INDEX;
+                    self.max_leaf_index = ROOT_INDEX;
+                };
+
+                self.replace_root(inner_child).destroy_empty_node();
+            };
+            return old_child;
+        };
+
+        // Compute directly, as we cannot use get_max_degree(), as self is already mutably borrowed.
+        let max_degree = if (is_leaf) {
+            self.leaf_max_degree as u64
+        } else {
+            self.inner_max_degree as u64
+        };
+        let degree = children.length();
+
+        // See if the node is big enough, or we need to merge it with another node on this level.
+        let big_enough = degree * 2 >= max_degree;
+
+        let new_max_key = *children.new_end_iter().iter_prev(children).iter_borrow_key(children);
+
+        // See if max key was updated for the current node, and if so - update it on the path.
+        let max_key_updated = cmp::compare(&new_max_key, key).is_lt();
+        if (max_key_updated) {
+            assert!(degree >= 1, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+
+            self.update_key(path_to_node, key, new_max_key);
+        };
+
+        // If node is big enough after removal, we are done.
+        if (big_enough) {
+            return old_child;
+        };
+
+        old_child
+    };
+
+    // Children size is below threshold, we need to rebalance with a neighbor on the same level.
+
+    // In order to work on multiple nodes at the same time, we cannot borrow_mut, and need to be
+    // remove_and_reserve existing node.
+    let (node_slot, node) = self.nodes.remove_and_reserve(node_index);
+
+    let is_leaf = node.is_leaf;
+    let max_degree = self.get_max_degree(is_leaf);
+    let prev = node.prev;
+    let next = node.next;
+
+    // index of the node we will rebalance with.
+    let sibling_index = {
+        let parent_children = &self.borrow_node(*path_to_node.borrow(path_to_node.length() - 1)).children;
+        assert!(parent_children.length() >= 2, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+        // If we are the largest node from the parent, we merge with the `prev`
+        // (which is then guaranteed to have the same parent, as any node has >1 children),
+        // otherwise we merge with `next`.
+        if (parent_children.new_end_iter().iter_prev(parent_children).iter_borrow(parent_children).node_index.stored_to_index() == node_index) {
+            prev
+        } else {
+            next
+        }
+    };
+
+    let children = &mut node.children;
+
+    let (sibling_slot, sibling_node) = self.nodes.remove_and_reserve(sibling_index);
+    assert!(is_leaf == sibling_node.is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    let sibling_children = &mut sibling_node.children;
+
+    if ((sibling_children.length() - 1) * 2 >= max_degree) {
+        // The sibling node has enough elements, we can just borrow an element from the sibling node.
+        if (sibling_index == next) {
+            // if sibling is the node with larger keys, we remove a child from the start
+            let old_max_key = *children.new_end_iter().iter_prev(children).iter_borrow_key(children);
+            let sibling_begin_iter = sibling_children.new_begin_iter();
+            let borrowed_max_key = *sibling_begin_iter.iter_borrow_key(sibling_children);
+            let borrowed_element = sibling_begin_iter.iter_remove(sibling_children);
+
+            children.new_end_iter().iter_add(children, borrowed_max_key, borrowed_element);
+
+            // max_key of the current node changed, so update
+            self.update_key(path_to_node, &old_max_key, borrowed_max_key);
+        } else {
+            // if sibling is the node with smaller keys, we remove a child from the end
+            let sibling_end_iter = sibling_children.new_end_iter().iter_prev(sibling_children);
+            let borrowed_max_key = *sibling_end_iter.iter_borrow_key(sibling_children);
+            let borrowed_element = sibling_end_iter.iter_remove(sibling_children);
+
+            children.add(borrowed_max_key, borrowed_element);
+
+            // max_key of the sibling node changed, so update
+            self.update_key(path_to_node, &borrowed_max_key, *sibling_children.new_end_iter().iter_prev(sibling_children).iter_borrow_key(sibling_children));
+        };
+
+        self.nodes.fill_reserved_slot(node_slot, node);
+        self.nodes.fill_reserved_slot(sibling_slot, sibling_node);
+        return old_child;
+    };
+
+    // The sibling node doesn't have enough elements to borrow, merge with the sibling node.
+    // Keep the slot of the node with larger keys of the two, to not require updating key on the parent nodes.
+    // But append to the node with smaller keys, as ordered_map::append is more efficient when adding to the end.
+    let (key_to_remove, reserved_slot_to_remove) = if (sibling_index == next) {
+        // destroying larger sibling node, keeping sibling_slot.
+        let Node::V1 { children: sibling_children, is_leaf: _, prev: _, next: sibling_next } = sibling_node;
+        let key_to_remove = *children.new_end_iter().iter_prev(children).iter_borrow_key(children);
+        children.append_disjoint(sibling_children);
+        node.next = sibling_next;
+
+        if (node.next != NULL_INDEX) {
+            assert!(self.nodes.borrow_mut(node.next).prev == sibling_index, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+        };
+
+        // we are removing node_index, which previous's node's next was pointing to,
+        // so update the pointer
+        if (node.prev != NULL_INDEX) {
+            self.nodes.borrow_mut(node.prev).next = sibling_index;
+        };
+        // Otherwise, we were the smallest node on the level. if this is the leaf level, update the pointer.
+        if (self.min_leaf_index == node_index) {
+            assert!(is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+            self.min_leaf_index = sibling_index;
+        };
+
+        self.nodes.fill_reserved_slot(sibling_slot, node);
+
+        (key_to_remove, node_slot)
+    } else {
+        // destroying larger current node, keeping node_slot
+        let Node::V1 { children: node_children, is_leaf: _, prev: _, next: node_next } = node;
+        let key_to_remove = *sibling_children.new_end_iter().iter_prev(sibling_children).iter_borrow_key(sibling_children);
+        sibling_children.append_disjoint(node_children);
+        sibling_node.next = node_next;
+
+        if (sibling_node.next != NULL_INDEX) {
+            assert!(self.nodes.borrow_mut(sibling_node.next).prev == node_index, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+        };
+        // we are removing sibling node_index, which previous's node's next was pointing to,
+        // so update the pointer
+        if (sibling_node.prev != NULL_INDEX) {
+            self.nodes.borrow_mut(sibling_node.prev).next = node_index;
+        };
+        // Otherwise, sibling was the smallest node on the level. if this is the leaf level, update the pointer.
+        if (self.min_leaf_index == sibling_index) {
+            assert!(is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+            self.min_leaf_index = node_index;
+        };
+
+        self.nodes.fill_reserved_slot(node_slot, sibling_node);
+
+        (key_to_remove, sibling_slot)
+    };
+
+    assert!(!path_to_node.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN));
+    let slot_to_remove = self.remove_at(path_to_node, &key_to_remove).destroy_inner_child();
+    self.nodes.free_reserved_slot(reserved_slot_to_remove, slot_to_remove);
+
+    old_child
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + + + + +### Function `add_at` + + +
fun add_at<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, path_to_node: vector<u64>, key: K, child: big_ordered_map::Child<V>, allow_overwrite: bool): option::Option<big_ordered_map::Child<V>>
+
+ + + + +
pragma opaque;
+
+ + + + + +### Function `remove_at` + + +
fun remove_at<K: copy, drop, store, V: store>(self: &mut big_ordered_map::BigOrderedMap<K, V>, path_to_node: vector<u64>, key: &K): big_ordered_map::Child<V>
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/block.md b/aptos-move/framework/aptos-framework/doc/block.md index f5c4430007732..ebb17f5d99997 100644 --- a/aptos-move/framework/aptos-framework/doc/block.md +++ b/aptos-move/framework/aptos-framework/doc/block.md @@ -54,7 +54,6 @@ This module defines a struct storing the metadata of the block and new block eve use 0x1::system_addresses; use 0x1::table_with_length; use 0x1::timestamp; -use 0x1::transaction_fee;
@@ -513,11 +512,12 @@ Can only be called as part of the Aptos governance proposal process established event::emit( UpdateEpochInterval { old_epoch_interval, new_epoch_interval }, ); + } else { + event::emit_event<UpdateEpochIntervalEvent>( + &mut block_resource.update_epoch_interval_events, + UpdateEpochIntervalEvent { old_epoch_interval, new_epoch_interval }, + ); }; - event::emit_event<UpdateEpochIntervalEvent>( - &mut block_resource.update_epoch_interval_events, - UpdateEpochIntervalEvent { old_epoch_interval, new_epoch_interval }, - ); }
@@ -593,7 +593,6 @@ Return epoch interval in seconds. let block_metadata_ref = borrow_global_mut<BlockResource>(@aptos_framework); block_metadata_ref.height = event::counter(&block_metadata_ref.new_block_events); - // Emit both event v1 and v2 for compatibility. Eventually only module events will be kept. let new_block_event = NewBlockEvent { hash, epoch, @@ -604,26 +603,7 @@ Return epoch interval in seconds. failed_proposer_indices, time_microseconds: timestamp, }; - let new_block_event_v2 = NewBlock { - hash, - epoch, - round, - height: block_metadata_ref.height, - previous_block_votes_bitvec, - proposer, - failed_proposer_indices, - time_microseconds: timestamp, - }; - emit_new_block_event(vm, &mut block_metadata_ref.new_block_events, new_block_event, new_block_event_v2); - - if (features::collect_and_distribute_gas_fees()) { - // Assign the fees collected from the previous block to the previous block proposer. - // If for any reason the fees cannot be assigned, this function burns the collected coins. - transaction_fee::process_collected_fees(); - // Set the proposer of this block as the receiver of the fees, so that the fees for this - // block are assigned to the right account. - transaction_fee::register_proposer_for_fee_collection(proposer); - }; + emit_new_block_event(vm, &mut block_metadata_ref.new_block_events, new_block_event); // Performance scores have to be updated before the epoch transition as the transaction that triggers the // transition is the last block in the previous epoch. @@ -759,7 +739,7 @@ Get the current block height Emit the event and update height and global timestamp -
fun emit_new_block_event(vm: &signer, event_handle: &mut event::EventHandle<block::NewBlockEvent>, new_block_event: block::NewBlockEvent, new_block_event_v2: block::NewBlock)
+
fun emit_new_block_event(vm: &signer, event_handle: &mut event::EventHandle<block::NewBlockEvent>, new_block_event: block::NewBlockEvent)
 
@@ -772,7 +752,6 @@ Emit the event and update height and global timestamp vm: &signer, event_handle: &mut EventHandle<NewBlockEvent>, new_block_event: NewBlockEvent, - new_block_event_v2: NewBlock ) acquires CommitHistory { if (exists<CommitHistory>(@aptos_framework)) { let commit_history_ref = borrow_global_mut<CommitHistory>(@aptos_framework); @@ -791,9 +770,6 @@ Emit the event and update height and global timestamp event::counter(event_handle) == new_block_event.height, error::invalid_argument(ENUM_NEW_BLOCK_EVENTS_DOES_NOT_MATCH_BLOCK_HEIGHT), ); - if (std::features::module_event_migration_enabled()) { - event::emit(new_block_event_v2); - }; event::emit_event<NewBlockEvent>(event_handle, new_block_event); }
@@ -835,16 +811,6 @@ reconfiguration event. failed_proposer_indices: vector::empty(), time_microseconds: 0, }, - NewBlock { - hash: genesis_id, - epoch: 0, - round: 0, - height: 0, - previous_block_votes_bitvec: vector::empty(), - proposer: @vm_reserved, - failed_proposer_indices: vector::empty(), - time_microseconds: 0, - } ); }
@@ -888,16 +854,6 @@ new block event for WriteSetPayload. failed_proposer_indices: vector::empty(), time_microseconds: timestamp::now_microseconds(), }, - NewBlock { - hash: fake_block_hash, - epoch: reconfiguration::current_epoch(), - round: MAX_U64, - height: block_metadata_ref.height, - previous_block_votes_bitvec: vector::empty(), - proposer: @vm_reserved, - failed_proposer_indices: vector::empty(), - time_microseconds: timestamp::now_microseconds(), - } ); }
@@ -972,7 +928,8 @@ new block event for WriteSetPayload. ### Module-level Specification -
invariant [suspendable] chain_status::is_operating() ==> exists<BlockResource>(@aptos_framework);
+
pragma verify = false;
+invariant [suspendable] chain_status::is_operating() ==> exists<BlockResource>(@aptos_framework);
 invariant [suspendable] chain_status::is_operating() ==> exists<CommitHistory>(@aptos_framework);
 
@@ -1088,6 +1045,68 @@ The number of new events created does not exceed MAX_U64. + + + + +
schema BlockRequirement {
+    vm: signer;
+    hash: address;
+    epoch: u64;
+    round: u64;
+    proposer: address;
+    failed_proposer_indices: vector<u64>;
+    previous_block_votes_bitvec: vector<u8>;
+    timestamp: u64;
+    requires chain_status::is_operating();
+    requires system_addresses::is_vm(vm);
+    // This enforces high-level requirement 4:
+    requires proposer == @vm_reserved || stake::spec_is_current_epoch_validator(proposer);
+    requires (proposer == @vm_reserved) ==> (timestamp::spec_now_microseconds() == timestamp);
+    requires (proposer != @vm_reserved) ==> (timestamp::spec_now_microseconds() < timestamp);
+    requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+    include staking_config::StakingRewardsConfigRequirement;
+}
+
+ + + + + + + +
schema Initialize {
+    aptos_framework: signer;
+    epoch_interval_microsecs: u64;
+    let addr = signer::address_of(aptos_framework);
+    // This enforces high-level requirement 2:
+    aborts_if addr != @aptos_framework;
+    aborts_if epoch_interval_microsecs == 0;
+    aborts_if exists<BlockResource>(addr);
+    aborts_if exists<CommitHistory>(addr);
+    ensures exists<BlockResource>(addr);
+    ensures exists<CommitHistory>(addr);
+    ensures global<BlockResource>(addr).height == 0;
+}
+
+ + + + + + + +
schema NewEventHandle {
+    aptos_framework: signer;
+    let addr = signer::address_of(aptos_framework);
+    let account = global<account::Account>(addr);
+    aborts_if !exists<account::Account>(addr);
+    aborts_if account.guid_creation_num + 2 > MAX_U64;
+}
+
+ + + ### Function `update_epoch_interval_microsecs` @@ -1224,7 +1243,7 @@ The BlockResource existed under the @aptos_framework. ### Function `emit_new_block_event` -
fun emit_new_block_event(vm: &signer, event_handle: &mut event::EventHandle<block::NewBlockEvent>, new_block_event: block::NewBlockEvent, new_block_event_v2: block::NewBlock)
+
fun emit_new_block_event(vm: &signer, event_handle: &mut event::EventHandle<block::NewBlockEvent>, new_block_event: block::NewBlockEvent)
 
diff --git a/aptos-move/framework/aptos-framework/doc/chain_status.md b/aptos-move/framework/aptos-framework/doc/chain_status.md index d7b9924a2be1b..4bbd28af10ccc 100644 --- a/aptos-move/framework/aptos-framework/doc/chain_status.md +++ b/aptos-move/framework/aptos-framework/doc/chain_status.md @@ -205,7 +205,7 @@ Helper function to assert genesis state.
public fun assert_genesis() {
-    assert!(is_genesis(), error::invalid_state(ENOT_GENESIS));
+    assert!(is_genesis(), error::invalid_state(ENOT_OPERATING));
 }
 
diff --git a/aptos-move/framework/aptos-framework/doc/code.md b/aptos-move/framework/aptos-framework/doc/code.md index 9eced3743699d..fe773e27c0f99 100644 --- a/aptos-move/framework/aptos-framework/doc/code.md +++ b/aptos-move/framework/aptos-framework/doc/code.md @@ -12,8 +12,11 @@ This module supports functionality related to code management. - [Struct `ModuleMetadata`](#0x1_code_ModuleMetadata) - [Struct `UpgradePolicy`](#0x1_code_UpgradePolicy) - [Struct `PublishPackage`](#0x1_code_PublishPackage) +- [Struct `CodePublishingPermission`](#0x1_code_CodePublishingPermission) - [Struct `AllowedDep`](#0x1_code_AllowedDep) - [Constants](#@Constants_0) +- [Function `check_code_publishing_permission`](#0x1_code_check_code_publishing_permission) +- [Function `grant_permission`](#0x1_code_grant_permission) - [Function `upgrade_policy_arbitrary`](#0x1_code_upgrade_policy_arbitrary) - [Function `upgrade_policy_compat`](#0x1_code_upgrade_policy_compat) - [Function `upgrade_policy_immutable`](#0x1_code_upgrade_policy_immutable) @@ -50,6 +53,7 @@ This module supports functionality related to code management. use 0x1::features; use 0x1::object; use 0x1::option; +use 0x1::permissioned_signer; use 0x1::signer; use 0x1::string; use 0x1::system_addresses; @@ -94,7 +98,7 @@ The package registry at the given address. Metadata for a package. All byte blobs are represented as base64-of-gzipped-bytes -
struct PackageMetadata has drop, store
+
struct PackageMetadata has copy, drop, store
 
@@ -200,7 +204,7 @@ A dependency to a package published at address Metadata about a module in a package. -
struct ModuleMetadata has drop, store
+
struct ModuleMetadata has copy, drop, store
 
@@ -300,6 +304,33 @@ Event emitted when code is published to an address. + + + + +## Struct `CodePublishingPermission` + + + +
struct CodePublishingPermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ +
@@ -413,6 +444,16 @@ Not the owner of the package registry. + + +Current permissioned signer cannot publish codes. + + +
const ENO_CODE_PERMISSION: u64 = 11;
+
+ + + Dependency could not be resolved to any published package. @@ -443,6 +484,59 @@ Cannot downgrade a package's upgradability policy + + +## Function `check_code_publishing_permission` + +Permissions + + +
public(friend) fun check_code_publishing_permission(s: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun check_code_publishing_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, CodePublishingPermission {}),
+        error::permission_denied(ENO_CODE_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `grant_permission` + +Grant permission to publish code on behalf of the master signer. + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, CodePublishingPermission {})
+}
+
+ + + +
+ ## Function `upgrade_policy_arbitrary` @@ -598,6 +692,7 @@ package.
public fun publish_package(owner: &signer, pack: PackageMetadata, code: vector<vector<u8>>) acquires PackageRegistry {
+    check_code_publishing_permission(owner);
     // Disallow incompatible upgrade mode. Governance can decide later if this should be reconsidered.
     assert!(
         pack.upgrade_policy.policy > upgrade_policy_arbitrary().policy,
@@ -612,9 +707,9 @@ package.
     // Checks for valid dependencies to other packages
     let allowed_deps = check_dependencies(addr, &pack);
 
-    // Check package against conflicts
+    // Check package against conflicts
     // To avoid prover compiler error on spec
-    // the package need to be an immutable variable
+    // the package need to be an immutable variable
     let module_names = get_module_names(&pack);
     let package_immutable = &borrow_global<PackageRegistry>(addr).packages;
     let len = vector::length(package_immutable);
@@ -679,6 +774,7 @@ package.
 
 
 
public fun freeze_code_object(publisher: &signer, code_object: Object<PackageRegistry>) acquires PackageRegistry {
+    check_code_publishing_permission(publisher);
     let code_object_addr = object::object_address(&code_object);
     assert!(exists<PackageRegistry>(code_object_addr), error::not_found(ECODE_OBJECT_DOES_NOT_EXIST));
     assert!(
@@ -687,9 +783,17 @@ package.
     );
 
     let registry = borrow_global_mut<PackageRegistry>(code_object_addr);
-    vector::for_each_mut<PackageMetadata>(&mut registry.packages, |pack| {
-        let package: &mut PackageMetadata = pack;
-        package.upgrade_policy = upgrade_policy_immutable();
+    vector::for_each_mut(&mut registry.packages, |pack| {
+        let package: &mut PackageMetadata = pack;
+        package.upgrade_policy = upgrade_policy_immutable();
+    });
+
+    // We unfortunately have to make a copy of each package to avoid borrow checker issues as check_dependencies
+    // needs to borrow PackageRegistry from the dependency packages.
+    // This would increase the amount of gas used, but this is a rare operation and it's rare to have many packages
+    // in a single code object.
+    vector::for_each(registry.packages, |pack| {
+        check_dependencies(code_object_addr, &pack);
     });
 }
 
@@ -779,7 +883,7 @@ Checks whether a new package with given names can co-exist with old package.
fun check_coexistence(old_pack: &PackageMetadata, new_modules: &vector<String>) {
-    // The modules introduced by each package must not overlap with `names`.
+    // The modules introduced by each package must not overlap with `names`.
     vector::for_each_ref(&old_pack.modules, |old_mod| {
         let old_mod: &ModuleMetadata = old_mod;
         let j = 0;
@@ -1065,7 +1169,7 @@ Native function to initiate module loading, including a list of allowed dependen
 
 
 
pragma verify = true;
-pragma aborts_if_is_strict;
+pragma aborts_if_is_partial;
 
@@ -1245,4 +1349,17 @@ Native function to initiate module loading, including a list of allowed dependen
+ + + + + +
schema AbortsIfPermissionedSigner {
+    s: signer;
+    let perm = CodePublishingPermission {};
+    aborts_if !permissioned_signer::spec_check_permission_exists(s, perm);
+}
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/coin.md b/aptos-move/framework/aptos-framework/doc/coin.md index 75965c9d1b5fa..c56a72944882c 100644 --- a/aptos-move/framework/aptos-framework/doc/coin.md +++ b/aptos-move/framework/aptos-framework/doc/coin.md @@ -18,8 +18,8 @@ This module provides the foundation for typesafe Coins. - [Struct `DepositEvent`](#0x1_coin_DepositEvent) - [Struct `WithdrawEvent`](#0x1_coin_WithdrawEvent) - [Struct `CoinEventHandleDeletion`](#0x1_coin_CoinEventHandleDeletion) +- [Struct `CoinStoreDeletion`](#0x1_coin_CoinStoreDeletion) - [Struct `PairCreation`](#0x1_coin_PairCreation) -- [Resource `MigrationFlag`](#0x1_coin_MigrationFlag) - [Struct `MintCapability`](#0x1_coin_MintCapability) - [Struct `FreezeCapability`](#0x1_coin_FreezeCapability) - [Struct `BurnCapability`](#0x1_coin_BurnCapability) @@ -29,6 +29,7 @@ This module provides the foundation for typesafe Coins. - [Struct `MintRefReceipt`](#0x1_coin_MintRefReceipt) - [Struct `TransferRefReceipt`](#0x1_coin_TransferRefReceipt) - [Struct `BurnRefReceipt`](#0x1_coin_BurnRefReceipt) +- [Resource `MigrationFlag`](#0x1_coin_MigrationFlag) - [Resource `Ghost$supply`](#0x1_coin_Ghost$supply) - [Resource `Ghost$aggregate_supply`](#0x1_coin_Ghost$aggregate_supply) - [Constants](#@Constants_0) @@ -53,16 +54,12 @@ This module provides the foundation for typesafe Coins. - [Function `convert_and_take_paired_burn_ref`](#0x1_coin_convert_and_take_paired_burn_ref) - [Function `return_paired_burn_ref`](#0x1_coin_return_paired_burn_ref) - [Function `borrow_paired_burn_ref`](#0x1_coin_borrow_paired_burn_ref) -- [Function `initialize_supply_config`](#0x1_coin_initialize_supply_config) - [Function `allow_supply_upgrades`](#0x1_coin_allow_supply_upgrades) -- [Function `initialize_aggregatable_coin`](#0x1_coin_initialize_aggregatable_coin) -- [Function `is_aggregatable_coin_zero`](#0x1_coin_is_aggregatable_coin_zero) -- [Function `drain_aggregatable_coin`](#0x1_coin_drain_aggregatable_coin) -- [Function `merge_aggregatable_coin`](#0x1_coin_merge_aggregatable_coin) -- [Function `collect_into_aggregatable_coin`](#0x1_coin_collect_into_aggregatable_coin) - [Function `calculate_amount_to_withdraw`](#0x1_coin_calculate_amount_to_withdraw) - [Function `maybe_convert_to_fungible_store`](#0x1_coin_maybe_convert_to_fungible_store) +- [Function `assert_signer_has_permission`](#0x1_coin_assert_signer_has_permission) - [Function `migrate_to_fungible_store`](#0x1_coin_migrate_to_fungible_store) +- [Function `migrate_coin_store_to_fungible_store`](#0x1_coin_migrate_coin_store_to_fungible_store) - [Function `coin_address`](#0x1_coin_coin_address) - [Function `balance`](#0x1_coin_balance) - [Function `is_balance_at_least`](#0x1_coin_is_balance_at_least) @@ -77,9 +74,11 @@ This module provides the foundation for typesafe Coins. - [Function `coin_supply`](#0x1_coin_coin_supply) - [Function `burn`](#0x1_coin_burn) - [Function `burn_from`](#0x1_coin_burn_from) +- [Function `burn_from_for_gas`](#0x1_coin_burn_from_for_gas) - [Function `deposit`](#0x1_coin_deposit) -- [Function `migrated_primary_fungible_store_exists`](#0x1_coin_migrated_primary_fungible_store_exists) -- [Function `force_deposit`](#0x1_coin_force_deposit) +- [Function `deposit_with_signer`](#0x1_coin_deposit_with_signer) +- [Function `can_receive_paired_fungible_asset`](#0x1_coin_can_receive_paired_fungible_asset) +- [Function `deposit_for_gas_fee`](#0x1_coin_deposit_for_gas_fee) - [Function `destroy_zero`](#0x1_coin_destroy_zero) - [Function `extract`](#0x1_coin_extract) - [Function `extract_all`](#0x1_coin_extract_all) @@ -94,7 +93,6 @@ This module provides the foundation for typesafe Coins. - [Function `register`](#0x1_coin_register) - [Function `transfer`](#0x1_coin_transfer) - [Function `value`](#0x1_coin_value) -- [Function `withdraw_from`](#0x1_coin_withdraw_from) - [Function `withdraw`](#0x1_coin_withdraw) - [Function `zero`](#0x1_coin_zero) - [Function `destroy_freeze_cap`](#0x1_coin_destroy_freeze_cap) @@ -108,13 +106,7 @@ This module provides the foundation for typesafe Coins. - [Struct `AggregatableCoin`](#@Specification_1_AggregatableCoin) - [Function `coin_to_fungible_asset`](#@Specification_1_coin_to_fungible_asset) - [Function `fungible_asset_to_coin`](#@Specification_1_fungible_asset_to_coin) - - [Function `initialize_supply_config`](#@Specification_1_initialize_supply_config) - [Function `allow_supply_upgrades`](#@Specification_1_allow_supply_upgrades) - - [Function `initialize_aggregatable_coin`](#@Specification_1_initialize_aggregatable_coin) - - [Function `is_aggregatable_coin_zero`](#@Specification_1_is_aggregatable_coin_zero) - - [Function `drain_aggregatable_coin`](#@Specification_1_drain_aggregatable_coin) - - [Function `merge_aggregatable_coin`](#@Specification_1_merge_aggregatable_coin) - - [Function `collect_into_aggregatable_coin`](#@Specification_1_collect_into_aggregatable_coin) - [Function `maybe_convert_to_fungible_store`](#@Specification_1_maybe_convert_to_fungible_store) - [Function `coin_address`](#@Specification_1_coin_address) - [Function `balance`](#@Specification_1_balance) @@ -128,7 +120,7 @@ This module provides the foundation for typesafe Coins. - [Function `burn`](#@Specification_1_burn) - [Function `burn_from`](#@Specification_1_burn_from) - [Function `deposit`](#@Specification_1_deposit) - - [Function `force_deposit`](#@Specification_1_force_deposit) + - [Function `deposit_for_gas_fee`](#@Specification_1_deposit_for_gas_fee) - [Function `destroy_zero`](#@Specification_1_destroy_zero) - [Function `extract`](#@Specification_1_extract) - [Function `extract_all`](#@Specification_1_extract_all) @@ -149,7 +141,6 @@ This module provides the foundation for typesafe Coins.
use 0x1::account;
 use 0x1::aggregator;
-use 0x1::aggregator_factory;
 use 0x1::create_signer;
 use 0x1::error;
 use 0x1::event;
@@ -159,12 +150,14 @@ This module provides the foundation for typesafe Coins.
 use 0x1::object;
 use 0x1::option;
 use 0x1::optional_aggregator;
+use 0x1::permissioned_signer;
 use 0x1::primary_fungible_store;
 use 0x1::signer;
 use 0x1::string;
 use 0x1::system_addresses;
 use 0x1::table;
 use 0x1::type_info;
+use 0x1::vector;
 
@@ -202,12 +195,11 @@ Main structure representing a coin/token in an account's custody. ## Struct `AggregatableCoin` -Represents a coin with aggregator as its value. This allows to update -the coin in every transaction avoiding read-modify-write conflicts. Only -used for gas fees distribution by Aptos Framework (0x1). +DEPRECATED -
struct AggregatableCoin<CoinType> has store
+
#[deprecated]
+struct AggregatableCoin<CoinType> has store
 
@@ -283,7 +275,8 @@ Configuration that controls the behavior of total coin supply. If the field is set, coin creators are allowed to upgrade to parallelizable implementations. -
struct SupplyConfig has key
+
#[deprecated]
+struct SupplyConfig has key
 
@@ -567,8 +560,11 @@ Event emitted when some amount of a coin is withdrawn from an account. Module event emitted when the event handles related to coin store is deleted. +Deprecated: replaced with CoinStoreDeletion +
#[event]
+#[deprecated]
 struct CoinEventHandleDeletion has drop, store
 
@@ -602,15 +598,15 @@ Module event emitted when the event handles related to coin store is deleted. - + -## Struct `PairCreation` +## Struct `CoinStoreDeletion` -Module event emitted when a new pair of coin and fungible asset is created. +Module event emitted when the event handles related to coin store is deleted.
#[event]
-struct PairCreation has drop, store
+struct CoinStoreDeletion has drop, store
 
@@ -621,13 +617,25 @@ Module event emitted when a new pair of coin and fungible asset is created.
-coin_type: type_info::TypeInfo +coin_type: string::String
-fungible_asset_metadata_address: address +event_handle_creation_address: address +
+
+ +
+
+deleted_deposit_event_handle_creation_number: u64 +
+
+ +
+
+deleted_withdraw_event_handle_creation_number: u64
@@ -637,15 +645,15 @@ Module event emitted when a new pair of coin and fungible asset is created. - + -## Resource `MigrationFlag` +## Struct `PairCreation` -The flag the existence of which indicates the primary fungible store is created by the migration from CoinStore. +Module event emitted when a new pair of coin and fungible asset is created. -
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
-struct MigrationFlag has key
+
#[event]
+struct PairCreation has drop, store
 
@@ -656,7 +664,13 @@ The flag the existence of which indicates the primary fungible store is created
-dummy_field: bool +coin_type: type_info::TypeInfo +
+
+ +
+
+fungible_asset_metadata_address: address
@@ -930,6 +944,36 @@ The hot potato receipt for flash borrowing BurnRef.
+ + + + +## Resource `MigrationFlag` + +The flag the existence of which indicates the primary fungible store is created by the migration from CoinStore. + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+#[deprecated]
+struct MigrationFlag has key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ +
@@ -1001,22 +1045,21 @@ Maximum possible aggregatable coin value. - + -Maximum possible coin supply. +Not enough coins to complete transaction -
const MAX_U128: u128 = 340282366920938463463374607431768211455;
+
const EINSUFFICIENT_BALANCE: u64 = 6;
 
- + -Not enough coins to complete transaction -
const EINSUFFICIENT_BALANCE: u64 = 6;
+
const MAX_DECIMALS: u8 = 32;
 
@@ -1071,6 +1114,16 @@ The coin converison map is not created yet. + + +The decimals of the coin is too large. + + +
const ECOIN_DECIMALS_TOO_LARGE: u64 = 29;
+
+ + + Address of account which is used to initialize a coin CoinType doesn't match the deployer of module @@ -1284,7 +1337,7 @@ The TransferRefReceipt does not match the TransferRef to be returned. -
const MAX_COIN_SYMBOL_LENGTH: u64 = 10;
+
const MAX_COIN_SYMBOL_LENGTH: u64 = 32;
 
@@ -1441,7 +1494,7 @@ Create APT pairing by passing AptosCoin. }; primary_fungible_store::create_primary_store_enabled_fungible_asset( &metadata_object_cref, - option::map(coin_supply<CoinType>(), |_| MAX_U128), + option::none(), name<CoinType>(), symbol<CoinType>(), decimals<CoinType>(), @@ -1964,7 +2017,7 @@ Return the BurnRef with the hot potato receipt. let metadata = assert_paired_metadata_exists<CoinType>(); let metadata_addr = object_address(&metadata); assert!(exists<PairedFungibleAssetRefs>(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND)); - let burn_ref_opt = &mut borrow_global_mut<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt; + let burn_ref_opt = &borrow_global<PairedFungibleAssetRefs>(metadata_addr).burn_ref_opt; assert!(option::is_some(burn_ref_opt), error::not_found(EBURN_REF_NOT_FOUND)); option::borrow(burn_ref_opt) } @@ -1974,14 +2027,15 @@ Return the BurnRef with the hot potato receipt. - + -## Function `initialize_supply_config` +## Function `allow_supply_upgrades` -Publishes supply configuration. Initially, upgrading is not allowed. +This should be called by on-chain governance to update the config and allow +or disallow upgradability of total supply. -
public(friend) fun initialize_supply_config(aptos_framework: &signer)
+
public fun allow_supply_upgrades(_aptos_framework: &signer, _allowed: bool)
 
@@ -1990,9 +2044,8 @@ Publishes supply configuration. Initially, upgrading is not allowed. Implementation -
public(friend) fun initialize_supply_config(aptos_framework: &signer) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    move_to(aptos_framework, SupplyConfig { allow_upgrades: false });
+
public fun allow_supply_upgrades(_aptos_framework: &signer, _allowed: bool) {
+    abort error::invalid_state(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED)
 }
 
@@ -2000,15 +2053,13 @@ Publishes supply configuration. Initially, upgrading is not allowed. - + -## Function `allow_supply_upgrades` +## Function `calculate_amount_to_withdraw` -This should be called by on-chain governance to update the config and allow -or disallow upgradability of total supply. -
public fun allow_supply_upgrades(aptos_framework: &signer, allowed: bool)
+
fun calculate_amount_to_withdraw<CoinType>(account_addr: address, amount: u64): (u64, u64)
 
@@ -2017,10 +2068,23 @@ or disallow upgradability of total supply. Implementation -
public fun allow_supply_upgrades(aptos_framework: &signer, allowed: bool) acquires SupplyConfig {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    let allow_upgrades = &mut borrow_global_mut<SupplyConfig>(@aptos_framework).allow_upgrades;
-    *allow_upgrades = allowed;
+
inline fun calculate_amount_to_withdraw<CoinType>(
+    account_addr: address,
+    amount: u64
+): (u64, u64) {
+    let coin_balance = coin_balance<CoinType>(account_addr);
+    if (coin_balance >= amount) {
+        (amount, 0)
+    } else {
+        let metadata = paired_metadata<CoinType>();
+        if (option::is_some(&metadata) && primary_fungible_store::primary_store_exists(
+            account_addr,
+            option::destroy_some(metadata)
+        ))
+            (coin_balance, amount - coin_balance)
+        else
+            abort error::invalid_argument(EINSUFFICIENT_BALANCE)
+    }
 }
 
@@ -2028,15 +2092,13 @@ or disallow upgradability of total supply. - + -## Function `initialize_aggregatable_coin` +## Function `maybe_convert_to_fungible_store` -Creates a new aggregatable coin with value overflowing on limit. Note that this function can -only be called by Aptos Framework (0x1) account for now because of create_aggregator. -
public(friend) fun initialize_aggregatable_coin<CoinType>(aptos_framework: &signer): coin::AggregatableCoin<CoinType>
+
fun maybe_convert_to_fungible_store<CoinType>(account: address)
 
@@ -2045,11 +2107,49 @@ only be called by Aptos Framework (0x1) account for now because of create_ Implementation -
public(friend) fun initialize_aggregatable_coin<CoinType>(aptos_framework: &signer): AggregatableCoin<CoinType> {
-    let aggregator = aggregator_factory::create_aggregator(aptos_framework, MAX_U64);
-    AggregatableCoin<CoinType> {
-        value: aggregator,
-    }
+
fun maybe_convert_to_fungible_store<CoinType>(account: address) acquires CoinStore, CoinConversionMap, CoinInfo {
+    if (!features::coin_to_fungible_asset_migration_feature_enabled()) {
+        abort error::unavailable(ECOIN_TO_FUNGIBLE_ASSET_FEATURE_NOT_ENABLED)
+    };
+    if (exists<CoinStore<CoinType>>(account)) {
+        let CoinStore<CoinType> { coin, frozen, deposit_events, withdraw_events } =
+            move_from<CoinStore<CoinType>>(account);
+        if (is_coin_initialized<CoinType>()) {
+            let metadata = ensure_paired_metadata<CoinType>();
+            let store = primary_fungible_store::ensure_primary_store_exists(account, metadata);
+
+            event::emit(CoinStoreDeletion {
+                coin_type: type_info::type_name<CoinType>(),
+                event_handle_creation_address: guid::creator_address(
+                    event::guid(&deposit_events)
+                ),
+                deleted_deposit_event_handle_creation_number: guid::creation_num(event::guid(&deposit_events)),
+                deleted_withdraw_event_handle_creation_number: guid::creation_num(event::guid(&withdraw_events))
+            });
+
+            if (coin.value == 0) {
+                destroy_zero(coin);
+            } else {
+                fungible_asset::unchecked_deposit_with_no_events(
+                    object_address(&store),
+                    coin_to_fungible_asset(coin)
+                );
+            };
+
+            // Note:
+            // It is possible the primary fungible store may already exist before this function call.
+            // In this case, if the account owns a frozen CoinStore and an unfrozen primary fungible store, this
+            // function would convert and deposit the rest coin into the primary store and freeze it to make the
+            // `frozen` semantic as consistent as possible.
+            if (frozen != fungible_asset::is_frozen(store)) {
+                fungible_asset::set_frozen_flag_internal(store, frozen);
+            }
+        } else {
+            destroy_zero(coin);
+        };
+        event::destroy_handle(deposit_events);
+        event::destroy_handle(withdraw_events);
+    };
 }
 
@@ -2057,14 +2157,13 @@ only be called by Aptos Framework (0x1) account for now because of create_ - + -## Function `is_aggregatable_coin_zero` +## Function `assert_signer_has_permission` -Returns true if the value of aggregatable coin is zero. -
public(friend) fun is_aggregatable_coin_zero<CoinType>(coin: &coin::AggregatableCoin<CoinType>): bool
+
fun assert_signer_has_permission<CoinType>(account: &signer)
 
@@ -2073,9 +2172,17 @@ Returns true if the value of aggregatable coin is zero. Implementation -
public(friend) fun is_aggregatable_coin_zero<CoinType>(coin: &AggregatableCoin<CoinType>): bool {
-    let amount = aggregator::read(&coin.value);
-    amount == 0
+
inline fun assert_signer_has_permission<CoinType>(account: &signer) {
+    if(permissioned_signer::is_permissioned_signer(account)) {
+        fungible_asset::withdraw_permission_check_by_address(
+            account,
+            primary_fungible_store::primary_store_address(
+                signer::address_of(account),
+                ensure_paired_metadata<CoinType>()
+            ),
+            0
+        );
+    }
 }
 
@@ -2083,14 +2190,14 @@ Returns true if the value of aggregatable coin is zero. - + -## Function `drain_aggregatable_coin` +## Function `migrate_to_fungible_store` -Drains the aggregatable coin, setting it to zero and returning a standard coin. +Voluntarily migrate to fungible store for CoinType if not yet. -
public(friend) fun drain_aggregatable_coin<CoinType>(coin: &mut coin::AggregatableCoin<CoinType>): coin::Coin<CoinType>
+
public entry fun migrate_to_fungible_store<CoinType>(account: &signer)
 
@@ -2099,23 +2206,12 @@ Drains the aggregatable coin, setting it to zero and returning a standard coin. Implementation -
public(friend) fun drain_aggregatable_coin<CoinType>(coin: &mut AggregatableCoin<CoinType>): Coin<CoinType> {
-    spec {
-        // TODO: The data invariant is not properly assumed from CollectedFeesPerBlock.
-        assume aggregator::spec_get_limit(coin.value) == MAX_U64;
-    };
-    let amount = aggregator::read(&coin.value);
-    assert!(amount <= MAX_U64, error::out_of_range(EAGGREGATABLE_COIN_VALUE_TOO_LARGE));
-    spec {
-        update aggregate_supply<CoinType> = aggregate_supply<CoinType> - amount;
-    };
-    aggregator::sub(&mut coin.value, amount);
-    spec {
-        update supply<CoinType> = supply<CoinType> + amount;
-    };
-    Coin<CoinType> {
-        value: (amount as u64),
-    }
+
public entry fun migrate_to_fungible_store<CoinType>(
+    account: &signer
+) acquires CoinStore, CoinConversionMap, CoinInfo {
+    let account_addr = signer::address_of(account);
+    assert_signer_has_permission<CoinType>(account);
+    maybe_convert_to_fungible_store<CoinType>(account_addr);
 }
 
@@ -2123,14 +2219,14 @@ Drains the aggregatable coin, setting it to zero and returning a standard coin. - + -## Function `merge_aggregatable_coin` +## Function `migrate_coin_store_to_fungible_store` -Merges coin into aggregatable coin (dst_coin). +Migrate to fungible store for CoinType if not yet. -
public(friend) fun merge_aggregatable_coin<CoinType>(dst_coin: &mut coin::AggregatableCoin<CoinType>, coin: coin::Coin<CoinType>)
+
public entry fun migrate_coin_store_to_fungible_store<CoinType>(accounts: vector<address>)
 
@@ -2139,19 +2235,14 @@ Merges coin into aggregatable coin ( Implementation -
public(friend) fun merge_aggregatable_coin<CoinType>(
-    dst_coin: &mut AggregatableCoin<CoinType>,
-    coin: Coin<CoinType>
-) {
-    spec {
-        update supply<CoinType> = supply<CoinType> - coin.value;
-    };
-    let Coin { value } = coin;
-    let amount = (value as u128);
-    spec {
-        update aggregate_supply<CoinType> = aggregate_supply<CoinType> + amount;
-    };
-    aggregator::add(&mut dst_coin.value, amount);
+
public entry fun migrate_coin_store_to_fungible_store<CoinType>(
+    accounts: vector<address>
+) acquires CoinStore, CoinConversionMap, CoinInfo {
+    if (features::new_accounts_default_to_fa_store_enabled() || features::new_accounts_default_to_fa_apt_store_enabled()) {
+        std::vector::for_each(accounts, |account| {
+            maybe_convert_to_fungible_store<CoinType>(account);
+        });
+    }
 }
 
@@ -2159,14 +2250,14 @@ Merges coin into aggregatable coin ( - + -## Function `collect_into_aggregatable_coin` +## Function `coin_address` -Collects a specified amount of coin form an account into aggregatable coin. +A helper function that returns the address of CoinType. -
public(friend) fun collect_into_aggregatable_coin<CoinType>(account_addr: address, amount: u64, dst_coin: &mut coin::AggregatableCoin<CoinType>)
+
fun coin_address<CoinType>(): address
 
@@ -2175,35 +2266,9 @@ Collects a specified amount of coin form an account into aggregatable coin. Implementation -
public(friend) fun collect_into_aggregatable_coin<CoinType>(
-    account_addr: address,
-    amount: u64,
-    dst_coin: &mut AggregatableCoin<CoinType>,
-) acquires CoinStore, CoinConversionMap, CoinInfo, PairedCoinType {
-    // Skip collecting if amount is zero.
-    if (amount == 0) {
-        return
-    };
-
-    let (coin_amount_to_collect, fa_amount_to_collect) = calculate_amount_to_withdraw<CoinType>(
-        account_addr,
-        amount
-    );
-    let coin = if (coin_amount_to_collect > 0) {
-        let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
-        extract(&mut coin_store.coin, coin_amount_to_collect)
-    } else {
-        zero()
-    };
-    if (fa_amount_to_collect > 0) {
-        let store_addr = primary_fungible_store::primary_store_address(
-            account_addr,
-            option::destroy_some(paired_metadata<CoinType>())
-        );
-        let fa = fungible_asset::withdraw_internal(store_addr, fa_amount_to_collect);
-        merge(&mut coin, fungible_asset_to_coin<CoinType>(fa));
-    };
-    merge_aggregatable_coin(dst_coin, coin);
+
fun coin_address<CoinType>(): address {
+    let type_info = type_info::type_of<CoinType>();
+    type_info::account_address(&type_info)
 }
 
@@ -2211,13 +2276,15 @@ Collects a specified amount of coin form an account into aggregatable coin. - + -## Function `calculate_amount_to_withdraw` +## Function `balance` +Returns the balance of owner for provided CoinType and its paired FA if exists. -
fun calculate_amount_to_withdraw<CoinType>(account_addr: address, amount: u64): (u64, u64)
+
#[view]
+public fun balance<CoinType>(owner: address): u64
 
@@ -2226,23 +2293,14 @@ Collects a specified amount of coin form an account into aggregatable coin. Implementation -
inline fun calculate_amount_to_withdraw<CoinType>(
-    account_addr: address,
-    amount: u64
-): (u64, u64) {
-    let coin_balance = coin_balance<CoinType>(account_addr);
-    if (coin_balance >= amount) {
-        (amount, 0)
-    } else {
-        let metadata = paired_metadata<CoinType>();
-        if (option::is_some(&metadata) && primary_fungible_store::primary_store_exists(
-            account_addr,
-            option::destroy_some(metadata)
-        ))
-            (coin_balance, amount - coin_balance)
-        else
-            abort error::invalid_argument(EINSUFFICIENT_BALANCE)
-    }
+
public fun balance<CoinType>(owner: address): u64 acquires CoinConversionMap, CoinStore {
+    let paired_metadata = paired_metadata<CoinType>();
+    coin_balance<CoinType>(owner) + if (option::is_some(&paired_metadata)) {
+        primary_fungible_store::balance(
+            owner,
+            option::extract(&mut paired_metadata)
+        )
+    } else { 0 }
 }
 
@@ -2250,13 +2308,15 @@ Collects a specified amount of coin form an account into aggregatable coin. - + -## Function `maybe_convert_to_fungible_store` +## Function `is_balance_at_least` +Returns whether the balance of owner for provided CoinType and its paired FA is >= amount. -
fun maybe_convert_to_fungible_store<CoinType>(account: address)
+
#[view]
+public fun is_balance_at_least<CoinType>(owner: address, amount: u64): bool
 
@@ -2265,160 +2325,10 @@ Collects a specified amount of coin form an account into aggregatable coin. Implementation -
fun maybe_convert_to_fungible_store<CoinType>(account: address) acquires CoinStore, CoinConversionMap, CoinInfo {
-    if (!features::coin_to_fungible_asset_migration_feature_enabled()) {
-        abort error::unavailable(ECOIN_TO_FUNGIBLE_ASSET_FEATURE_NOT_ENABLED)
-    };
-    assert!(is_coin_initialized<CoinType>(), error::invalid_argument(ECOIN_INFO_NOT_PUBLISHED));
-
-    let metadata = ensure_paired_metadata<CoinType>();
-    let store = primary_fungible_store::ensure_primary_store_exists(account, metadata);
-    let store_address = object::object_address(&store);
-    if (exists<CoinStore<CoinType>>(account)) {
-        let CoinStore<CoinType> { coin, frozen, deposit_events, withdraw_events } = move_from<CoinStore<CoinType>>(
-            account
-        );
-        event::emit(
-            CoinEventHandleDeletion {
-                event_handle_creation_address: guid::creator_address(
-                    event::guid(&deposit_events)
-                ),
-                deleted_deposit_event_handle_creation_number: guid::creation_num(event::guid(&deposit_events)),
-                deleted_withdraw_event_handle_creation_number: guid::creation_num(event::guid(&withdraw_events))
-            }
-        );
-        event::destroy_handle(deposit_events);
-        event::destroy_handle(withdraw_events);
-        if (coin.value == 0) {
-            destroy_zero(coin);
-        } else {
-            fungible_asset::deposit(store, coin_to_fungible_asset(coin));
-        };
-        // Note:
-        // It is possible the primary fungible store may already exist before this function call.
-        // In this case, if the account owns a frozen CoinStore and an unfrozen primary fungible store, this
-        // function would convert and deposit the rest coin into the primary store and freeze it to make the
-        // `frozen` semantic as consistent as possible.
-        if (frozen != fungible_asset::is_frozen(store)) {
-            fungible_asset::set_frozen_flag_internal(store, frozen);
-        }
-    };
-    if (!exists<MigrationFlag>(store_address)) {
-        move_to(&create_signer::create_signer(store_address), MigrationFlag {});
-    }
-}
-
- - - - - - - -## Function `migrate_to_fungible_store` - -Voluntarily migrate to fungible store for CoinType if not yet. - - -
public entry fun migrate_to_fungible_store<CoinType>(account: &signer)
-
- - - -
-Implementation - - -
public entry fun migrate_to_fungible_store<CoinType>(
-    account: &signer
-) acquires CoinStore, CoinConversionMap, CoinInfo {
-    maybe_convert_to_fungible_store<CoinType>(signer::address_of(account));
-}
-
- - - -
- - - -## Function `coin_address` - -A helper function that returns the address of CoinType. - - -
fun coin_address<CoinType>(): address
-
- - - -
-Implementation - - -
fun coin_address<CoinType>(): address {
-    let type_info = type_info::type_of<CoinType>();
-    type_info::account_address(&type_info)
-}
-
- - - -
- - - -## Function `balance` - -Returns the balance of owner for provided CoinType and its paired FA if exists. - - -
#[view]
-public fun balance<CoinType>(owner: address): u64
-
- - - -
-Implementation - - -
public fun balance<CoinType>(owner: address): u64 acquires CoinConversionMap, CoinStore {
-    let paired_metadata = paired_metadata<CoinType>();
-    coin_balance<CoinType>(owner) + if (option::is_some(&paired_metadata)) {
-        primary_fungible_store::balance(
-            owner,
-            option::extract(&mut paired_metadata)
-        )
-    } else { 0 }
-}
-
- - - -
- - - -## Function `is_balance_at_least` - -Returns whether the balance of owner for provided CoinType and its paired FA is >= amount. - - -
#[view]
-public fun is_balance_at_least<CoinType>(owner: address, amount: u64): bool
-
- - - -
-Implementation - - -
public fun is_balance_at_least<CoinType>(owner: address, amount: u64): bool acquires CoinConversionMap, CoinStore {
-    let coin_balance = coin_balance<CoinType>(owner);
-    if (coin_balance >= amount) {
-        return true
+
public fun is_balance_at_least<CoinType>(owner: address, amount: u64): bool acquires CoinConversionMap, CoinStore {
+    let coin_balance = coin_balance<CoinType>(owner);
+    if (coin_balance >= amount) {
+        return true
     };
 
     let paired_metadata = paired_metadata<CoinType>();
@@ -2510,7 +2420,7 @@ Returns true is account_addr has frozen the CoinStore or if
 
 
public fun is_coin_store_frozen<CoinType>(
     account_addr: address
-): bool acquires CoinStore, CoinConversionMap {
+): bool acquires CoinStore, CoinConversionMap, CoinInfo {
     if (!is_account_registered<CoinType>(account_addr)) {
         return true
     };
@@ -2541,15 +2451,13 @@ Returns true if account_addr is registered to r
 Implementation
 
 
-
public fun is_account_registered<CoinType>(account_addr: address): bool acquires CoinConversionMap {
+
public fun is_account_registered<CoinType>(account_addr: address): bool acquires CoinConversionMap, CoinInfo {
     assert!(is_coin_initialized<CoinType>(), error::invalid_argument(ECOIN_INFO_NOT_PUBLISHED));
     if (exists<CoinStore<CoinType>>(account_addr)) {
         true
     } else {
-        let paired_metadata_opt = paired_metadata<CoinType>();
-        (option::is_some(
-            &paired_metadata_opt
-        ) && migrated_primary_fungible_store_exists(account_addr, option::destroy_some(paired_metadata_opt)))
+        let paired_metadata = ensure_paired_metadata<CoinType>();
+        can_receive_paired_fungible_asset(account_addr, paired_metadata)
     }
 }
 
@@ -2784,6 +2692,54 @@ Note: This bypasses CoinStore::frozen -- coins within a frozen CoinStore can be +
+ + + +## Function `burn_from_for_gas` + + + +
public(friend) fun burn_from_for_gas<CoinType>(account_addr: address, amount: u64, burn_cap: &coin::BurnCapability<CoinType>)
+
+ + + +
+Implementation + + +
public(friend) fun burn_from_for_gas<CoinType>(
+    account_addr: address,
+    amount: u64,
+    burn_cap: &BurnCapability<CoinType>,
+) acquires CoinInfo, CoinStore, CoinConversionMap, PairedFungibleAssetRefs {
+    // Skip burning if amount is zero. This shouldn't error out as it's called as part of transaction fee burning.
+    if (amount == 0) {
+        return
+    };
+
+    let (coin_amount_to_burn, fa_amount_to_burn) = calculate_amount_to_withdraw<CoinType>(
+        account_addr,
+        amount
+    );
+    if (coin_amount_to_burn > 0) {
+        let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
+        let coin_to_burn = extract(&mut coin_store.coin, coin_amount_to_burn);
+        burn(coin_to_burn, burn_cap);
+    };
+    if (fa_amount_to_burn > 0) {
+        fungible_asset::address_burn_from_for_gas(
+            borrow_paired_burn_ref(burn_cap),
+            primary_fungible_store::primary_store_address(account_addr, option::destroy_some(paired_metadata<CoinType>())),
+            fa_amount_to_burn
+        );
+    };
+}
+
+ + +
@@ -2812,22 +2768,14 @@ Deposit the coin balance into the recipient's account and emit an event. !coin_store.frozen, error::permission_denied(EFROZEN), ); - if (std::features::module_event_migration_enabled()) { - event::emit( - CoinDeposit { coin_type: type_name<CoinType>(), account: account_addr, amount: coin.value } + event::emit_event<DepositEvent>( + &mut coin_store.deposit_events, + DepositEvent { amount: coin.value }, ); - }; - event::emit_event<DepositEvent>( - &mut coin_store.deposit_events, - DepositEvent { amount: coin.value }, - ); merge(&mut coin_store.coin, coin); } else { - let metadata = paired_metadata<CoinType>(); - if (option::is_some(&metadata) && migrated_primary_fungible_store_exists( - account_addr, - option::destroy_some(metadata) - )) { + let metadata = ensure_paired_metadata<CoinType>(); + if (can_receive_paired_fungible_asset( account_addr, metadata)) { primary_fungible_store::deposit(account_addr, coin_to_fungible_asset(coin)); } else { abort error::not_found(ECOIN_STORE_NOT_PUBLISHED) @@ -2840,13 +2788,50 @@ Deposit the coin balance into the recipient's account and emit an event. - + + +## Function `deposit_with_signer` + + + +
public fun deposit_with_signer<CoinType>(account: &signer, coin: coin::Coin<CoinType>)
+
+ + + +
+Implementation + + +
public fun deposit_with_signer<CoinType>(
+    account: &signer,
+    coin: Coin<CoinType>
+) acquires CoinStore, CoinConversionMap, CoinInfo {
+    let metadata = ensure_paired_metadata<CoinType>();
+    let account_address = signer::address_of(account);
+    fungible_asset::refill_permission(
+        account,
+        coin.value,
+        primary_fungible_store::primary_store_address_inlined(
+            account_address,
+            metadata,
+        )
+    );
+    deposit(account_address, coin);
+}
+
+ + + +
+ + -## Function `migrated_primary_fungible_store_exists` +## Function `can_receive_paired_fungible_asset` -
fun migrated_primary_fungible_store_exists(account_address: address, metadata: object::Object<fungible_asset::Metadata>): bool
+
fun can_receive_paired_fungible_asset(account_address: address, metadata: object::Object<fungible_asset::Metadata>): bool
 
@@ -2855,15 +2840,17 @@ Deposit the coin balance into the recipient's account and emit an event. Implementation -
inline fun migrated_primary_fungible_store_exists(
+
inline fun can_receive_paired_fungible_asset(
     account_address: address,
     metadata: Object<Metadata>
 ): bool {
-    let primary_store_address = primary_fungible_store::primary_store_address<Metadata>(account_address, metadata);
-    fungible_asset::store_exists(primary_store_address) && (
-        // migration flag is needed, until we start defaulting new accounts to APT PFS
-        features::new_accounts_default_to_fa_apt_store_enabled() || exists<MigrationFlag>(primary_store_address)
-    )
+    features::new_accounts_default_to_fa_store_enabled() || (features::new_accounts_default_to_fa_apt_store_enabled() && object::object_address(&metadata) == @0xa) || {
+        let primary_store_address = primary_fungible_store::primary_store_address<Metadata>(
+            account_address,
+            metadata
+        );
+        fungible_asset::store_exists(primary_store_address)
+    }
 }
 
@@ -2871,15 +2858,15 @@ Deposit the coin balance into the recipient's account and emit an event. - + -## Function `force_deposit` +## Function `deposit_for_gas_fee` Deposit the coin balance into the recipient's account without checking if the account is frozen. This is for internal use only and doesn't emit an DepositEvent. -
public(friend) fun force_deposit<CoinType>(account_addr: address, coin: coin::Coin<CoinType>)
+
public(friend) fun deposit_for_gas_fee<CoinType>(account_addr: address, coin: coin::Coin<CoinType>)
 
@@ -2888,7 +2875,7 @@ This is for internal use only and doesn't emit an DepositEvent. Implementation -
public(friend) fun force_deposit<CoinType>(
+
public(friend) fun deposit_for_gas_fee<CoinType>(
     account_addr: address,
     coin: Coin<CoinType>
 ) acquires CoinStore, CoinConversionMap, CoinInfo {
@@ -2896,15 +2883,15 @@ This is for internal use only and doesn't emit an DepositEvent.
         let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
         merge(&mut coin_store.coin, coin);
     } else {
-        let metadata = paired_metadata<CoinType>();
-        if (option::is_some(&metadata) && migrated_primary_fungible_store_exists(
+        let metadata = ensure_paired_metadata<CoinType>();
+        if (can_receive_paired_fungible_asset(
             account_addr,
-            option::destroy_some(metadata)
+            metadata
         )) {
             let fa = coin_to_fungible_asset(coin);
             let metadata = fungible_asset::asset_metadata(&fa);
-            let store = primary_fungible_store::primary_store(account_addr, metadata);
-            fungible_asset::deposit_internal(object::object_address(&store), fa);
+            let store = primary_fungible_store::ensure_primary_store_exists(account_addr, metadata);
+            fungible_asset::unchecked_deposit_with_no_events(object::object_address(&store), fa);
         } else {
             abort error::not_found(ECOIN_STORE_NOT_PUBLISHED)
         }
@@ -3081,7 +3068,7 @@ Upgrade total supply to use a parallelizable implementation if it is
 available.
 
 
-
public entry fun upgrade_supply<CoinType>(account: &signer)
+
public entry fun upgrade_supply<CoinType>(_account: &signer)
 
@@ -3090,30 +3077,8 @@ available. Implementation -
public entry fun upgrade_supply<CoinType>(account: &signer) acquires CoinInfo, SupplyConfig {
-    let account_addr = signer::address_of(account);
-
-    // Only coin creators can upgrade total supply.
-    assert!(
-        coin_address<CoinType>() == account_addr,
-        error::invalid_argument(ECOIN_INFO_ADDRESS_MISMATCH),
-    );
-
-    // Can only succeed once on-chain governance agreed on the upgrade.
-    assert!(
-        borrow_global_mut<SupplyConfig>(@aptos_framework).allow_upgrades,
-        error::permission_denied(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED)
-    );
-
-    let maybe_supply = &mut borrow_global_mut<CoinInfo<CoinType>>(account_addr).supply;
-    if (option::is_some(maybe_supply)) {
-        let supply = option::borrow_mut(maybe_supply);
-
-        // If supply is tracked and the current implementation uses an integer - upgrade.
-        if (!optional_aggregator::is_parallelizable(supply)) {
-            optional_aggregator::switch(supply);
-        }
-    }
+
public entry fun upgrade_supply<CoinType>(_account: &signer) {
+    abort error::invalid_state(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED)
 }
 
@@ -3145,7 +3110,7 @@ The given signer also becomes the account hosting the information about the coi symbol: string::String, decimals: u8, monitor_supply: bool, -): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) { +): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) acquires CoinInfo, CoinConversionMap { initialize_internal(account, name, symbol, decimals, monitor_supply, false) }
@@ -3176,7 +3141,7 @@ Same as initialize but supply can be initialized to parallelizable symbol: string::String, decimals: u8, monitor_supply: bool, -): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) { +): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) acquires CoinInfo, CoinConversionMap { system_addresses::assert_aptos_framework(account); initialize_internal(account, name, symbol, decimals, monitor_supply, true) } @@ -3208,8 +3173,9 @@ Same as initialize but supply can be initialized to parallelizable decimals: u8, monitor_supply: bool, parallelizable: bool, -): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) { +): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) acquires CoinInfo, CoinConversionMap { let account_addr = signer::address_of(account); + assert_signer_has_permission<CoinType>(account); assert!( coin_address<CoinType>() == account_addr, @@ -3223,6 +3189,7 @@ Same as initialize but supply can be initialized to parallelizable assert!(string::length(&name) <= MAX_COIN_NAME_LENGTH, error::invalid_argument(ECOIN_NAME_TOO_LONG)); assert!(string::length(&symbol) <= MAX_COIN_SYMBOL_LENGTH, error::invalid_argument(ECOIN_SYMBOL_TOO_LONG)); + assert!(decimals <= MAX_DECIMALS, error::invalid_argument(ECOIN_DECIMALS_TOO_LARGE)); let coin_info = CoinInfo<CoinType> { name, @@ -3230,7 +3197,7 @@ Same as initialize but supply can be initialized to parallelizable decimals, supply: if (monitor_supply) { option::some( - optional_aggregator::new(MAX_U128, parallelizable) + optional_aggregator::new(parallelizable) ) } else { option::none() }, }; @@ -3325,8 +3292,9 @@ Returns minted Coin. Implementation -
public fun register<CoinType>(account: &signer) acquires CoinConversionMap {
+
public fun register<CoinType>(account: &signer) acquires CoinInfo, CoinConversionMap {
     let account_addr = signer::address_of(account);
+    assert_signer_has_permission<CoinType>(account);
     // Short-circuit and do nothing if account is already registered for CoinType.
     if (is_account_registered<CoinType>(account_addr)) {
         return
@@ -3400,71 +3368,6 @@ Returns the value passed in coin
-
-
-
-## Function `withdraw_from`
-
-Withdraws a specifed amount of coin CoinType from the specified account.
-@param account The account from which to withdraw the coin.
-@param amount The amount of coin to withdraw.
-
-
-
public(friend) fun withdraw_from<CoinType>(account_addr: address, amount: u64): coin::Coin<CoinType>
-
- - - -
-Implementation - - -
public(friend) fun withdraw_from<CoinType>(
-    account_addr: address,
-    amount: u64
-): Coin<CoinType> acquires CoinStore, CoinConversionMap, CoinInfo, PairedCoinType {
-
-    let (coin_amount_to_withdraw, fa_amount_to_withdraw) = calculate_amount_to_withdraw<CoinType>(
-        account_addr,
-        amount
-    );
-    let withdrawn_coin = if (coin_amount_to_withdraw > 0) {
-        let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr);
-        assert!(
-            !coin_store.frozen,
-            error::permission_denied(EFROZEN),
-        );
-        if (std::features::module_event_migration_enabled()) {
-            event::emit(
-                CoinWithdraw {
-                    coin_type: type_name<CoinType>(), account: account_addr, amount: coin_amount_to_withdraw
-                }
-            );
-        };
-        event::emit_event<WithdrawEvent>(
-            &mut coin_store.withdraw_events,
-            WithdrawEvent { amount: coin_amount_to_withdraw },
-        );
-        extract(&mut coin_store.coin, coin_amount_to_withdraw)
-    } else {
-        zero()
-    };
-    if (fa_amount_to_withdraw > 0) {
-        let store_addr = primary_fungible_store::primary_store_address(
-            account_addr,
-            option::destroy_some(paired_metadata<CoinType>())
-        );
-        let fa = fungible_asset::withdraw_internal(store_addr, fa_amount_to_withdraw);
-        merge(&mut withdrawn_coin, fungible_asset_to_coin<CoinType>(fa));
-    };
-
-    withdrawn_coin
-}
-
- - -
@@ -3494,18 +3397,22 @@ Withdraw specified amount of coin CoinType from the si amount ); let withdrawn_coin = if (coin_amount_to_withdraw > 0) { + let metadata = ensure_paired_metadata<CoinType>(); + if(permissioned_signer::is_permissioned_signer(account)) { + // Perform the check only if the account is a permissioned signer to save the cost of + // computing the primary store location. + fungible_asset::withdraw_permission_check_by_address( + account, + primary_fungible_store::primary_store_address(account_addr, metadata), + coin_amount_to_withdraw + ); + }; + let coin_store = borrow_global_mut<CoinStore<CoinType>>(account_addr); assert!( !coin_store.frozen, error::permission_denied(EFROZEN), ); - if (std::features::module_event_migration_enabled()) { - event::emit( - CoinWithdraw { - coin_type: type_name<CoinType>(), account: account_addr, amount: coin_amount_to_withdraw - } - ); - }; event::emit_event<WithdrawEvent>( &mut coin_store.withdraw_events, WithdrawEvent { amount: coin_amount_to_withdraw }, @@ -3817,6 +3724,7 @@ Destroy a burn capability.
pragma verify = true;
+pragma aborts_if_is_partial;
 
 global supply<CoinType>: num;
 
@@ -3885,7 +3793,8 @@ initialize, initialize_internal, initialize_with_parallelizable_supply;
 ### Struct `AggregatableCoin`
 
 
-
struct AggregatableCoin<CoinType> has store
+
#[deprecated]
+struct AggregatableCoin<CoinType> has store
 
@@ -3940,165 +3849,51 @@ initialize, initialize_internal, initialize_with_parallelizable_supply; - - -### Function `initialize_supply_config` - - -
public(friend) fun initialize_supply_config(aptos_framework: &signer)
-
- - -Can only be initialized once. -Can only be published by reserved addresses. - - -
let aptos_addr = signer::address_of(aptos_framework);
-aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
-aborts_if exists<SupplyConfig>(aptos_addr);
-ensures !global<SupplyConfig>(aptos_addr).allow_upgrades;
-ensures exists<SupplyConfig>(aptos_addr);
-
- - - ### Function `allow_supply_upgrades` -
public fun allow_supply_upgrades(aptos_framework: &signer, allowed: bool)
+
public fun allow_supply_upgrades(_aptos_framework: &signer, _allowed: bool)
 
Can only be updated by @aptos_framework. -
modifies global<SupplyConfig>(@aptos_framework);
-let aptos_addr = signer::address_of(aptos_framework);
-aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
-aborts_if !exists<SupplyConfig>(aptos_addr);
-let post allow_upgrades_post = global<SupplyConfig>(@aptos_framework);
-ensures allow_upgrades_post.allow_upgrades == allowed;
+
aborts_if true;
 
- - -### Function `initialize_aggregatable_coin` - - -
public(friend) fun initialize_aggregatable_coin<CoinType>(aptos_framework: &signer): coin::AggregatableCoin<CoinType>
-
- - - - -
include system_addresses::AbortsIfNotAptosFramework { account: aptos_framework };
-include aggregator_factory::CreateAggregatorInternalAbortsIf;
-
- - - - - -### Function `is_aggregatable_coin_zero` - - -
public(friend) fun is_aggregatable_coin_zero<CoinType>(coin: &coin::AggregatableCoin<CoinType>): bool
-
- - - - -
aborts_if false;
-ensures result == (aggregator::spec_read(coin.value) == 0);
-
- - - - - -### Function `drain_aggregatable_coin` - - -
public(friend) fun drain_aggregatable_coin<CoinType>(coin: &mut coin::AggregatableCoin<CoinType>): coin::Coin<CoinType>
-
- - - - -
aborts_if aggregator::spec_read(coin.value) > MAX_U64;
-ensures result.value == aggregator::spec_aggregator_get_val(old(coin).value);
-
- - - - - -### Function `merge_aggregatable_coin` - - -
public(friend) fun merge_aggregatable_coin<CoinType>(dst_coin: &mut coin::AggregatableCoin<CoinType>, coin: coin::Coin<CoinType>)
-
- - - - -
let aggr = dst_coin.value;
-let post p_aggr = dst_coin.value;
-aborts_if aggregator::spec_aggregator_get_val(aggr)
-    + coin.value > aggregator::spec_get_limit(aggr);
-aborts_if aggregator::spec_aggregator_get_val(aggr)
-    + coin.value > MAX_U128;
-ensures aggregator::spec_aggregator_get_val(aggr) + coin.value == aggregator::spec_aggregator_get_val(p_aggr);
-
- - - - + -### Function `collect_into_aggregatable_coin` +### Function `maybe_convert_to_fungible_store` -
public(friend) fun collect_into_aggregatable_coin<CoinType>(account_addr: address, amount: u64, dst_coin: &mut coin::AggregatableCoin<CoinType>)
+
fun maybe_convert_to_fungible_store<CoinType>(account: address)
 
pragma verify = false;
-let aggr = dst_coin.value;
-let post p_aggr = dst_coin.value;
-let coin_store = global<CoinStore<CoinType>>(account_addr);
-let post p_coin_store = global<CoinStore<CoinType>>(account_addr);
-aborts_if amount > 0 && !exists<CoinStore<CoinType>>(account_addr);
-aborts_if amount > 0 && coin_store.coin.value < amount;
-aborts_if amount > 0 && aggregator::spec_aggregator_get_val(aggr)
-    + amount > aggregator::spec_get_limit(aggr);
-aborts_if amount > 0 && aggregator::spec_aggregator_get_val(aggr)
-    + amount > MAX_U128;
-ensures aggregator::spec_aggregator_get_val(aggr) + amount == aggregator::spec_aggregator_get_val(p_aggr);
-ensures coin_store.coin.value - amount == p_coin_store.coin.value;
+modifies global<CoinInfo<CoinType>>(account);
+modifies global<CoinStore<CoinType>>(account);
 
- -### Function `maybe_convert_to_fungible_store` + -
fun maybe_convert_to_fungible_store<CoinType>(account: address)
-
- - - - -
pragma verify = false;
-modifies global<CoinInfo<CoinType>>(account);
-modifies global<CoinStore<CoinType>>(account);
+
schema DepositAbortsIf<CoinType> {
+    account_addr: address;
+    let coin_store = global<CoinStore<CoinType>>(account_addr);
+    aborts_if !exists<CoinStore<CoinType>>(account_addr);
+    aborts_if coin_store.frozen;
+}
 
@@ -4213,12 +4008,15 @@ Get address by reflection. -
fun spec_is_account_registered<CoinType>(account_addr: address): bool {
-   let paired_metadata_opt = spec_paired_metadata<CoinType>();
-   exists<CoinStore<CoinType>>(account_addr) || (option::spec_is_some(
-       paired_metadata_opt
-   ) && primary_fungible_store::spec_primary_store_exists(account_addr, option::spec_borrow(paired_metadata_opt)))
-}
+
fun spec_is_account_registered<CoinType>(account_addr:address): bool;
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if false;
+ensures [abstract] result == spec_is_account_registered<CoinType>(account_addr);
 
@@ -4449,12 +4247,12 @@ Get address by reflection. - + -### Function `force_deposit` +### Function `deposit_for_gas_fee` -
public(friend) fun force_deposit<CoinType>(account_addr: address, coin: coin::Coin<CoinType>)
+
public(friend) fun deposit_for_gas_fee<CoinType>(account_addr: address, coin: coin::Coin<CoinType>)
 
@@ -4571,7 +4369,7 @@ The value of zero_coin must be 0. ### Function `upgrade_supply` -
public entry fun upgrade_supply<CoinType>(account: &signer)
+
public entry fun upgrade_supply<CoinType>(_account: &signer)
 
@@ -4579,26 +4377,7 @@ The creator of CoinType must be @aptos_framework. SupplyConfig allow upgrade. -
let account_addr = signer::address_of(account);
-let coin_address = type_info::type_of<CoinType>().account_address;
-aborts_if coin_address != account_addr;
-aborts_if !exists<SupplyConfig>(@aptos_framework);
-// This enforces high-level requirement 1:
-aborts_if !exists<CoinInfo<CoinType>>(account_addr);
-let supply_config = global<SupplyConfig>(@aptos_framework);
-aborts_if !supply_config.allow_upgrades;
-modifies global<CoinInfo<CoinType>>(account_addr);
-let maybe_supply = global<CoinInfo<CoinType>>(account_addr).supply;
-let supply = option::spec_borrow(maybe_supply);
-let value = optional_aggregator::optional_aggregator_value(supply);
-let post post_maybe_supply = global<CoinInfo<CoinType>>(account_addr).supply;
-let post post_supply = option::spec_borrow(post_maybe_supply);
-let post post_value = optional_aggregator::optional_aggregator_value(post_supply);
-let supply_no_parallel = option::spec_is_some(maybe_supply) &&
-    !optional_aggregator::is_parallelizable(supply);
-aborts_if supply_no_parallel && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
-ensures supply_no_parallel ==>
-    optional_aggregator::is_parallelizable(post_supply) && post_value == value;
+
aborts_if true;
 
@@ -4647,6 +4426,27 @@ The creator of CoinType must be @aptos_framework.
+Make sure name and symbol are legal length. +Only the creator of CoinType can initialize. + + + + + +
schema InitializeInternalSchema<CoinType> {
+    account: signer;
+    name: vector<u8>;
+    symbol: vector<u8>;
+    let account_addr = signer::address_of(account);
+    let coin_address = type_info::type_of<CoinType>().account_address;
+    aborts_if coin_address != account_addr;
+    aborts_if exists<CoinInfo<CoinType>>(account_addr);
+    aborts_if len(name) > MAX_COIN_NAME_LENGTH;
+    aborts_if len(symbol) > MAX_COIN_SYMBOL_LENGTH;
+}
+
+ + @@ -4677,7 +4477,7 @@ The creator of CoinType must be @aptos_framework. && coin_info.symbol == symbol && coin_info.decimals == decimals; ensures if (monitor_supply) { - value == 0 && limit == MAX_U128 + value == 0 && limit == MAX_U128 && (parallelizable == optional_aggregator::is_parallelizable(supply)) } else { option::spec_is_none(coin_info.supply) diff --git a/aptos-move/framework/aptos-framework/doc/common_account_abstractions_utils.md b/aptos-move/framework/aptos-framework/doc/common_account_abstractions_utils.md new file mode 100644 index 0000000000000..efd6e85e2154c --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/common_account_abstractions_utils.md @@ -0,0 +1,96 @@ + + + +# Module `0x1::common_account_abstractions_utils` + + + +- [Function `network_name`](#0x1_common_account_abstractions_utils_network_name) +- [Function `entry_function_name`](#0x1_common_account_abstractions_utils_entry_function_name) + + +
use 0x1::chain_id;
+use 0x1::string;
+use 0x1::string_utils;
+use 0x1::transaction_context;
+use 0x1::vector;
+
+ + + + + +## Function `network_name` + + + +
public(friend) fun network_name(): vector<u8>
+
+ + + +
+Implementation + + +
public(friend) fun network_name(): vector<u8> {
+    let chain_id = chain_id::get();
+    if (chain_id == 1) {
+        b"mainnet"
+    } else if (chain_id == 2) {
+        b"testnet"
+    } else if (chain_id == 4) {
+        b"local"
+    } else {
+        let network_name = &mut vector[];
+        network_name.append(b"custom network: ");
+        network_name.append(*string_utils::to_string(&chain_id).bytes());
+        *network_name
+    }
+}
+
+ + + +
+ + + +## Function `entry_function_name` + + + +
public(friend) fun entry_function_name(entry_function_payload: &transaction_context::EntryFunctionPayload): vector<u8>
+
+ + + +
+Implementation + + +
public(friend) fun entry_function_name(entry_function_payload: &EntryFunctionPayload): vector<u8> {
+    let entry_function_name = &mut vector[];
+    let addr_str = string_utils::to_string(
+        &transaction_context::account_address(entry_function_payload)
+    ).bytes();
+    // .slice(1) to remove the leading '@' char
+    entry_function_name.append(addr_str.slice(1, addr_str.length()));
+    entry_function_name.append(b"::");
+    entry_function_name.append(
+        *transaction_context::module_name(entry_function_payload).bytes()
+    );
+    entry_function_name.append(b"::");
+    entry_function_name.append(
+        *transaction_context::function_name(entry_function_payload).bytes()
+    );
+    *entry_function_name
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/common_domain_aa_auths.md b/aptos-move/framework/aptos-framework/doc/common_domain_aa_auths.md new file mode 100644 index 0000000000000..93b9d688b0284 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/common_domain_aa_auths.md @@ -0,0 +1,73 @@ + + + +# Module `0x1::common_domain_aa_auths` + + + +- [Constants](#@Constants_0) +- [Function `authenticate_ed25519_hex`](#0x1_common_domain_aa_auths_authenticate_ed25519_hex) + + +
use 0x1::auth_data;
+use 0x1::ed25519;
+use 0x1::error;
+use 0x1::string;
+use 0x1::string_utils;
+
+ + + + + +## Constants + + + + + + +
const EINVALID_SIGNATURE: u64 = 1;
+
+ + + + + +## Function `authenticate_ed25519_hex` + + + +
public fun authenticate_ed25519_hex(account: signer, aa_auth_data: auth_data::AbstractionAuthData): signer
+
+ + + +
+Implementation + + +
public fun authenticate_ed25519_hex(account: signer, aa_auth_data: AbstractionAuthData): signer {
+    let hex_digest = string_utils::to_string(aa_auth_data.digest());
+
+    let public_key = new_unvalidated_public_key_from_bytes(*aa_auth_data.domain_account_identity());
+    let signature = new_signature_from_bytes(*aa_auth_data.domain_authenticator());
+    assert!(
+        ed25519::signature_verify_strict(
+            &signature,
+            &public_key,
+            *hex_digest.bytes(),
+        ),
+        error::permission_denied(EINVALID_SIGNATURE)
+    );
+
+    account
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/config_buffer.md b/aptos-move/framework/aptos-framework/doc/config_buffer.md index a4ee3dd11301d..232b2b6a659ae 100644 --- a/aptos-move/framework/aptos-framework/doc/config_buffer.md +++ b/aptos-move/framework/aptos-framework/doc/config_buffer.md @@ -22,13 +22,15 @@ NOTE: on-chain config 0x1::state::ValidatorSet implemented its own - [Function `does_exist`](#0x1_config_buffer_does_exist) - [Function `upsert`](#0x1_config_buffer_upsert) - [Function `extract`](#0x1_config_buffer_extract) +- [Function `extract_v2`](#0x1_config_buffer_extract_v2) - [Specification](#@Specification_1) - [Function `does_exist`](#@Specification_1_does_exist) - [Function `upsert`](#@Specification_1_upsert) - - [Function `extract`](#@Specification_1_extract) + - [Function `extract_v2`](#@Specification_1_extract_v2)
use 0x1::any;
+use 0x1::error;
 use 0x1::option;
 use 0x1::simple_map;
 use 0x1::string;
@@ -70,6 +72,16 @@ NOTE: on-chain config 0x1::state::ValidatorSet implemented its own
 ## Constants
 
 
+
+
+Function is deprecated.
+
+
+
const EDEPRECATED: u64 = 2;
+
+ + + Config buffer operations failed with permission denied. @@ -173,13 +185,39 @@ Typically used in X::set_for_next_epoch() where X is an on-chain co ## Function `extract` +Use extract_v2 instead. + + +
#[deprecated]
+public fun extract<T: store>(): T
+
+ + + +
+Implementation + + +
public fun extract<T: store>(): T {
+    abort(error::unavailable(EDEPRECATED))
+}
+
+ + + +
+ + + +## Function `extract_v2` + Take the buffered config T out (buffer cleared). Abort if the buffer is empty. Should only be used at the end of a reconfiguration. Typically used in X::on_new_epoch() where X is an on-chaon config. -
public fun extract<T: store>(): T
+
public(friend) fun extract_v2<T: store>(): T
 
@@ -188,7 +226,7 @@ Typically used in X::on_new_epoch() where X is an on-chaon config. Implementation -
public fun extract<T: store>(): T acquires PendingConfigs {
+
public(friend) fun extract_v2<T: store>(): T acquires PendingConfigs {
     let configs = borrow_global_mut<PendingConfigs>(@aptos_framework);
     let key = type_info::type_name<T>();
     let (_, value_packed) = simple_map::remove(&mut configs.configs, &key);
@@ -261,12 +299,12 @@ Typically used in X::on_new_epoch() where X is an on-chaon config.
 
 
 
-
+
 
-### Function `extract`
+### Function `extract_v2`
 
 
-
public fun extract<T: store>(): T
+
public(friend) fun extract_v2<T: store>(): T
 
@@ -287,7 +325,7 @@ Typically used in X::on_new_epoch() where X is an on-chaon config. let key = type_info::type_name<T>(); aborts_if !simple_map::spec_contains_key(configs.configs, key); include any::UnpackAbortsIf<T> { - x: simple_map::spec_get(configs.configs, key) + self: simple_map::spec_get(configs.configs, key) }; }
@@ -318,7 +356,7 @@ Typically used in X::on_new_epoch() where X is an on-chaon config. let type_name = type_info::type_name<T>(); let configs = global<PendingConfigs>(@aptos_framework); include spec_fun_does_exist<T>(type_name) ==> any::UnpackAbortsIf<T> { - x: simple_map::spec_get(configs.configs, type_name) + self: simple_map::spec_get(configs.configs, type_name) }; }
@@ -333,7 +371,7 @@ Typically used in X::on_new_epoch() where X is an on-chaon config. let type_name = type_info::type_name<T>(); let configs = global<PendingConfigs>(@aptos_framework); include spec_fun_does_exist<T>(type_name) ==> any::UnpackRequirement<T> { - x: simple_map::spec_get(configs.configs, type_name) + self: simple_map::spec_get(configs.configs, type_name) }; }
diff --git a/aptos-move/framework/aptos-framework/doc/consensus_config.md b/aptos-move/framework/aptos-framework/doc/consensus_config.md index ddceb292dbded..9a4e01694a8df 100644 --- a/aptos-move/framework/aptos-framework/doc/consensus_config.md +++ b/aptos-move/framework/aptos-framework/doc/consensus_config.md @@ -192,7 +192,7 @@ Only used in reconfigurations to apply the pending on_new_epoch(framework: &signer) acquires ConsensusConfig { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist<ConsensusConfig>()) { - let new_config = config_buffer::extract<ConsensusConfig>(); + let new_config = config_buffer::extract_v2<ConsensusConfig>(); if (exists<ConsensusConfig>(@aptos_framework)) { *borrow_global_mut<ConsensusConfig>(@aptos_framework) = new_config; } else { @@ -348,7 +348,6 @@ When setting now time must be later than last_reconfiguration_time.
pragma verify_duration_estimate = 600;
-include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
 include staking_config::StakingRewardsConfigRequirement;
 let addr = signer::address_of(account);
 // This enforces high-level requirement 2:
@@ -358,7 +357,6 @@ When setting now time must be later than last_reconfiguration_time.
 aborts_if !(len(config) > 0);
 requires chain_status::is_genesis();
 requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time();
-requires exists<stake::ValidatorFees>(@aptos_framework);
 requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
 ensures global<ConsensusConfig>(@aptos_framework).config == config;
 
diff --git a/aptos-move/framework/aptos-framework/doc/create_signer.md b/aptos-move/framework/aptos-framework/doc/create_signer.md index a66df38b9c601..5c15eb6bec51f 100644 --- a/aptos-move/framework/aptos-framework/doc/create_signer.md +++ b/aptos-move/framework/aptos-framework/doc/create_signer.md @@ -127,6 +127,16 @@ Convert address to singer and return.
pragma opaque;
 aborts_if [abstract] false;
 ensures [abstract] signer::address_of(result) == addr;
+ensures [abstract] result == spec_create_signer(addr);
+
+ + + + + + + +
fun spec_create_signer(addr: address): signer;
 
diff --git a/aptos-move/framework/aptos-framework/doc/delegation_pool.md b/aptos-move/framework/aptos-framework/doc/delegation_pool.md index 9ce29ab22430e..4a149e51e3705 100644 --- a/aptos-move/framework/aptos-framework/doc/delegation_pool.md +++ b/aptos-move/framework/aptos-framework/doc/delegation_pool.md @@ -124,6 +124,7 @@ transferred to A - [Resource `BeneficiaryForOperator`](#0x1_delegation_pool_BeneficiaryForOperator) - [Resource `NextCommissionPercentage`](#0x1_delegation_pool_NextCommissionPercentage) - [Resource `DelegationPoolAllowlisting`](#0x1_delegation_pool_DelegationPoolAllowlisting) +- [Enum `DelegationPermission`](#0x1_delegation_pool_DelegationPermission) - [Struct `AddStake`](#0x1_delegation_pool_AddStake) - [Struct `AddStakeEvent`](#0x1_delegation_pool_AddStakeEvent) - [Struct `ReactivateStake`](#0x1_delegation_pool_ReactivateStake) @@ -171,6 +172,10 @@ transferred to A - [Function `allowlisting_enabled`](#0x1_delegation_pool_allowlisting_enabled) - [Function `delegator_allowlisted`](#0x1_delegation_pool_delegator_allowlisted) - [Function `get_delegators_allowlist`](#0x1_delegation_pool_get_delegators_allowlist) +- [Function `check_delegation_pool_management_permission`](#0x1_delegation_pool_check_delegation_pool_management_permission) +- [Function `grant_delegation_pool_management_permission`](#0x1_delegation_pool_grant_delegation_pool_management_permission) +- [Function `check_stake_management_permission`](#0x1_delegation_pool_check_stake_management_permission) +- [Function `grant_stake_management_permission`](#0x1_delegation_pool_grant_stake_management_permission) - [Function `initialize_delegation_pool`](#0x1_delegation_pool_initialize_delegation_pool) - [Function `beneficiary_for_operator`](#0x1_delegation_pool_beneficiary_for_operator) - [Function `enable_partial_governance_voting`](#0x1_delegation_pool_enable_partial_governance_voting) @@ -245,6 +250,7 @@ transferred to A use 0x1::error; use 0x1::event; use 0x1::features; +use 0x1::permissioned_signer; use 0x1::pool_u64_unbound; use 0x1::signer; use 0x1::smart_table; @@ -678,6 +684,55 @@ evicted later by the pool owner.
+ + + + +## Enum `DelegationPermission` + + + +
enum DelegationPermission has copy, drop, store
+
+ + + +
+Variants + + +
+DelegationPoolManagementPermission + + +
+Fields + + +
+
+ + +
+ +
+ +
+StakeManagementPermission + + +
+Fields + + +
+
+ + +
+ +
+
@@ -1728,6 +1783,16 @@ Cannot unlock the accumulated active stake of NULL_SHAREHOLDER(0x0). + + +Use delegator voting flow instead. Delegation pools can no longer specify a single delegated voter. + + +
const ECAN_NO_LONGER_SET_DELEGATED_VOTER: u64 = 29;
+
+ + + Changing operator commission rate in delegation pool is not supported. @@ -1828,6 +1893,16 @@ There is not enough active stake on the stake pool to unlock< + + +Signer does not have permission to perform delegation logic. + + +
const ENO_DELEGATION_PERMISSION: u64 = 28;
+
+ + + Changing beneficiaries for operators is not supported. @@ -2756,6 +2831,109 @@ Return allowlist or revert if allowlisting is not enabled for the provided deleg + + + + +## Function `check_delegation_pool_management_permission` + +Permissions + + +
fun check_delegation_pool_management_permission(s: &signer)
+
+ + + +
+Implementation + + +
inline fun check_delegation_pool_management_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, DelegationPermission::DelegationPoolManagementPermission {}),
+        error::permission_denied(ENO_DELEGATION_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `grant_delegation_pool_management_permission` + + + +
public fun grant_delegation_pool_management_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_delegation_pool_management_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, DelegationPermission::DelegationPoolManagementPermission {})
+}
+
+ + + +
+ + + +## Function `check_stake_management_permission` + + + +
fun check_stake_management_permission(s: &signer)
+
+ + + +
+Implementation + + +
inline fun check_stake_management_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, DelegationPermission::StakeManagementPermission {}),
+        error::permission_denied(ENO_DELEGATION_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `grant_stake_management_permission` + + + +
public fun grant_stake_management_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_stake_management_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, DelegationPermission::StakeManagementPermission {})
+}
+
+ + +
@@ -2782,7 +2960,7 @@ Ownership over setting the operator/voter is granted to owner who h operator_commission_percentage: u64, delegation_pool_creation_seed: vector<u8>, ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { - assert!(features::delegation_pools_enabled(), error::invalid_state(EDELEGATION_POOLS_DISABLED)); + check_delegation_pool_management_permission(owner); let owner_address = signer::address_of(owner); assert!(!owner_cap_exists(owner_address), error::already_exists(EOWNER_CAP_ALREADY_EXISTS)); assert!(operator_commission_percentage <= MAX_FEE, error::invalid_argument(EINVALID_COMMISSION_PERCENTAGE)); @@ -2823,10 +3001,7 @@ Ownership over setting the operator/voter is granted to owner who h move_to(owner, DelegationPoolOwnership { pool_address }); // All delegation pool enable partial governance voting by default once the feature flag is enabled. - if (features::partial_governance_voting_enabled( - ) && features::delegation_pool_partial_governance_voting_enabled()) { - enable_partial_governance_voting(pool_address); - } + enable_partial_governance_voting(pool_address); }
@@ -2884,18 +3059,13 @@ The existing voter will be replaced. The function is permissionless.
public entry fun enable_partial_governance_voting(
     pool_address: address,
 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
-    assert!(features::partial_governance_voting_enabled(), error::invalid_state(EDISABLED_FUNCTION));
-    assert!(
-        features::delegation_pool_partial_governance_voting_enabled(),
-        error::invalid_state(EDISABLED_FUNCTION)
-    );
     assert_delegation_pool_exists(pool_address);
     // synchronize delegation and stake pools before any user operation.
     synchronize_delegation_pool(pool_address);
 
     let delegation_pool = borrow_global<DelegationPool>(pool_address);
     let stake_pool_signer = retrieve_stake_pool_owner(delegation_pool);
-    // delegated_voter is managed by the stake pool itself, which signer capability is managed by DelegationPool.
+    // delegated_voter is managed by the stake pool itself, which signer capability is managed by DelegationPool.
     // So voting power of this stake pool can only be used through this module.
     stake::set_delegated_voter(&stake_pool_signer, signer::address_of(&stake_pool_signer));
 
@@ -2942,6 +3112,7 @@ Vote on a proposal with a voter's voting power. To successfully vote, the follow
     voting_power: u64,
     should_pass: bool
 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
+    check_stake_management_permission(voter);
     assert_partial_governance_voting_enabled(pool_address);
     // synchronize delegation and stake pools before any user operation.
     synchronize_delegation_pool(pool_address);
@@ -2955,6 +3126,7 @@ Vote on a proposal with a voter's voting power. To successfully vote, the follow
     if (voting_power > remaining_voting_power) {
         voting_power = remaining_voting_power;
     };
+    aptos_governance::assert_proposal_expiration(pool_address, proposal_id);
     assert!(voting_power > 0, error::invalid_argument(ENO_VOTING_POWER));
 
     let governance_records = borrow_global_mut<GovernanceRecords>(pool_address);
@@ -2976,18 +3148,18 @@ Vote on a proposal with a voter's voting power. To successfully vote, the follow
                 should_pass,
             }
         );
+    } else {
+        event::emit_event(
+            &mut governance_records.vote_events,
+            VoteEvent {
+                voter: voter_address,
+                proposal_id,
+                delegation_pool: pool_address,
+                num_votes: voting_power,
+                should_pass,
+            }
+        );
     };
-
-    event::emit_event(
-        &mut governance_records.vote_events,
-        VoteEvent {
-            voter: voter_address,
-            proposal_id,
-            delegation_pool: pool_address,
-            num_votes: voting_power,
-            should_pass,
-        }
-    );
 }
 
@@ -3021,6 +3193,7 @@ voting power in THIS delegation pool must be not less than the minimum required metadata_hash: vector<u8>, is_multi_step_proposal: bool, ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(voter); assert_partial_governance_voting_enabled(pool_address); // synchronize delegation and stake pools before any user operation @@ -3053,16 +3226,16 @@ voting power in THIS delegation pool must be not less than the minimum required delegation_pool: pool_address, } ); + } else { + event::emit_event( + &mut governance_records.create_proposal_events, + CreateProposalEvent { + proposal_id, + voter: voter_addr, + delegation_pool: pool_address, + } + ); }; - - event::emit_event( - &mut governance_records.create_proposal_events, - CreateProposalEvent { - proposal_id, - voter: voter_addr, - delegation_pool: pool_address, - } - ); }
@@ -3793,6 +3966,7 @@ Allows an owner to change the operator of the underlying stake pool. owner: &signer, new_operator: address ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_delegation_pool_management_permission(owner); let pool_address = get_owned_pool_address(signer::address_of(owner)); // synchronize delegation and stake pools before any user operation // ensure the old operator is paid its uncommitted commission rewards @@ -3828,9 +4002,7 @@ one for each pool. operator: &signer, new_beneficiary: address ) acquires BeneficiaryForOperator { - assert!(features::operator_beneficiary_change_enabled(), std::error::invalid_state( - EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED - )); + check_stake_management_permission(operator); // The beneficiay address of an operator is stored under the operator's address. // So, the operator does not need to be validated with respect to a staking pool. let operator_addr = signer::address_of(operator); @@ -3873,9 +4045,7 @@ Allows an owner to update the commission percentage for the operator of the unde owner: &signer, new_commission_percentage: u64 ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { - assert!(features::commission_change_delegation_pool_enabled(), error::invalid_state( - ECOMMISSION_RATE_CHANGE_NOT_SUPPORTED - )); + check_delegation_pool_management_permission(owner); assert!(new_commission_percentage <= MAX_FEE, error::invalid_argument(EINVALID_COMMISSION_PERCENTAGE)); let owner_address = signer::address_of(owner); let pool_address = get_owned_pool_address(owner_address); @@ -3922,10 +4092,11 @@ Allows an owner to update the commission percentage for the operator of the unde ## Function `set_delegated_voter` -Allows an owner to change the delegated voter of the underlying stake pool. +Deprecated. Use the partial governance voting flow instead. -
public entry fun set_delegated_voter(owner: &signer, new_voter: address)
+
#[deprecated]
+public entry fun set_delegated_voter(_owner: &signer, _new_voter: address)
 
@@ -3935,18 +4106,10 @@ Allows an owner to change the delegated voter of the underlying stake pool.
public entry fun set_delegated_voter(
-    owner: &signer,
-    new_voter: address
-) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage {
-    // No one can change delegated_voter once the partial governance voting feature is enabled.
-    assert!(
-        !features::delegation_pool_partial_governance_voting_enabled(),
-        error::invalid_state(EDEPRECATED_FUNCTION)
-    );
-    let pool_address = get_owned_pool_address(signer::address_of(owner));
-    // synchronize delegation and stake pools before any user operation
-    synchronize_delegation_pool(pool_address);
-    stake::set_delegated_voter(&retrieve_stake_pool_owner(borrow_global<DelegationPool>(pool_address)), new_voter);
+    _owner: &signer,
+    _new_voter: address
+) {
+    abort ECAN_NO_LONGER_SET_DELEGATED_VOTER
 }
 
@@ -3976,6 +4139,7 @@ this change won't take effects until the next lockup period. pool_address: address, new_voter: address ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(delegator); assert_partial_governance_voting_enabled(pool_address); // synchronize delegation and stake pools before any user operation @@ -4020,13 +4184,13 @@ this change won't take effects until the next lockup period. delegator: delegator_address, voter: new_voter, }) + } else { + event::emit_event(&mut governance_records.delegate_voting_power_events, DelegateVotingPowerEvent { + pool_address, + delegator: delegator_address, + voter: new_voter, + }); }; - - event::emit_event(&mut governance_records.delegate_voting_power_events, DelegateVotingPowerEvent { - pool_address, - delegator: delegator_address, - voter: new_voter, - }); }
@@ -4053,6 +4217,7 @@ Enable delegators allowlisting as the pool owner.
public entry fun enable_delegators_allowlisting(
     owner: &signer,
 ) acquires DelegationPoolOwnership, DelegationPool {
+    check_delegation_pool_management_permission(owner);
     assert!(
         features::delegation_pool_allowlisting_enabled(),
         error::invalid_state(EDELEGATORS_ALLOWLISTING_NOT_SUPPORTED)
@@ -4091,6 +4256,7 @@ Disable delegators allowlisting as the pool owner. The existing allowlist will b
 
public entry fun disable_delegators_allowlisting(
     owner: &signer,
 ) acquires DelegationPoolOwnership, DelegationPoolAllowlisting {
+    check_delegation_pool_management_permission(owner);
     let pool_address = get_owned_pool_address(signer::address_of(owner));
     assert_allowlisting_enabled(pool_address);
 
@@ -4126,6 +4292,7 @@ Allowlist a delegator as the pool owner.
     owner: &signer,
     delegator_address: address,
 ) acquires DelegationPoolOwnership, DelegationPoolAllowlisting {
+    check_delegation_pool_management_permission(owner);
     let pool_address = get_owned_pool_address(signer::address_of(owner));
     assert_allowlisting_enabled(pool_address);
 
@@ -4161,6 +4328,7 @@ Remove a delegator from the allowlist as the pool owner, but do not unlock their
     owner: &signer,
     delegator_address: address,
 ) acquires DelegationPoolOwnership, DelegationPoolAllowlisting {
+    check_delegation_pool_management_permission(owner);
     let pool_address = get_owned_pool_address(signer::address_of(owner));
     assert_allowlisting_enabled(pool_address);
 
@@ -4196,6 +4364,7 @@ Evict a delegator that is not allowlisted by unlocking their entire stake.
     owner: &signer,
     delegator_address: address,
 ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting {
+    check_delegation_pool_management_permission(owner);
     let pool_address = get_owned_pool_address(signer::address_of(owner));
     assert_allowlisting_enabled(pool_address);
     assert!(
@@ -4240,6 +4409,7 @@ Add amount of coins to the delegation pool pool_addressaddress,
     amount: u64
 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting {
+    check_stake_management_permission(delegator);
     // short-circuit if amount to add is 0 so no event is emitted
     if (amount == 0) { return };
 
@@ -4277,17 +4447,17 @@ Add amount of coins to the delegation pool pool_addresselse {
+        event::emit_event(
+            &mut pool.add_stake_events,
+            AddStakeEvent {
+                pool_address,
+                delegator_address,
+                amount_added: amount,
+                add_stake_fee,
+            },
+        );
     };
-
-    event::emit_event(
-        &mut pool.add_stake_events,
-        AddStakeEvent {
-            pool_address,
-            delegator_address,
-            amount_added: amount,
-            add_stake_fee,
-        },
-    );
 }
 
@@ -4317,6 +4487,7 @@ at most how much active stake there is on the stake pool. pool_address: address, amount: u64 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(delegator); // short-circuit if amount to unlock is 0 so no event is emitted if (amount == 0) { return }; @@ -4380,16 +4551,16 @@ at most how much active stake there is on the stake pool. amount_unlocked: amount, }, ); + } else { + event::emit_event( + &mut pool.unlock_stake_events, + UnlockStakeEvent { + pool_address, + delegator_address, + amount_unlocked: amount, + }, + ); }; - - event::emit_event( - &mut pool.unlock_stake_events, - UnlockStakeEvent { - pool_address, - delegator_address, - amount_unlocked: amount, - }, - ); }
@@ -4418,6 +4589,7 @@ Move amount of coins from pending_inactive to active. pool_address: address, amount: u64 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { + check_stake_management_permission(delegator); // short-circuit if amount to reactivate is 0 so no event is emitted if (amount == 0) { return }; @@ -4450,16 +4622,16 @@ Move amount of coins from pending_inactive to active. amount_reactivated: amount, }, ); + } else { + event::emit_event( + &mut pool.reactivate_stake_events, + ReactivateStakeEvent { + pool_address, + delegator_address, + amount_reactivated: amount, + }, + ); }; - - event::emit_event( - &mut pool.reactivate_stake_events, - ReactivateStakeEvent { - pool_address, - delegator_address, - amount_reactivated: amount, - }, - ); }
@@ -4488,6 +4660,7 @@ Withdraw amount of owned inactive stake from the delegation pool at pool_address: address, amount: u64 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(delegator); assert!(amount > 0, error::invalid_argument(EWITHDRAW_ZERO_STAKE)); // synchronize delegation and stake pools before any user operation synchronize_delegation_pool(pool_address); @@ -4575,16 +4748,16 @@ Withdraw amount of owned inactive stake from the delegation pool at amount_withdrawn: amount, }, ); + } else { + event::emit_event( + &mut pool.withdraw_stake_events, + WithdrawStakeEvent { + pool_address, + delegator_address, + amount_withdrawn: amount, + }, + ); }; - - event::emit_event( - &mut pool.withdraw_stake_events, - WithdrawStakeEvent { - pool_address, - delegator_address, - amount_withdrawn: amount, - }, - ); }
@@ -5095,15 +5268,13 @@ shares pools, assign commission to operator and eventually prepare delegation po }, ); - if (features::operator_beneficiary_change_enabled()) { - emit(DistributeCommission { - pool_address, - operator: stake::get_operator(pool_address), - beneficiary: beneficiary_for_operator(stake::get_operator(pool_address)), - commission_active, - commission_pending_inactive, - }) - }; + emit(DistributeCommission { + pool_address, + operator: stake::get_operator(pool_address), + beneficiary: beneficiary_for_operator(stake::get_operator(pool_address)), + commission_active, + commission_pending_inactive, + }); // advance lockup cycle on delegation pool if already ended on stake pool (AND stake explicitly inactivated) if (lockup_cycle_ended) { diff --git a/aptos-move/framework/aptos-framework/doc/dispatchable_fungible_asset.md b/aptos-move/framework/aptos-framework/doc/dispatchable_fungible_asset.md index bb6d62d42743e..7dc2ce54d7ac3 100644 --- a/aptos-move/framework/aptos-framework/doc/dispatchable_fungible_asset.md +++ b/aptos-move/framework/aptos-framework/doc/dispatchable_fungible_asset.md @@ -23,19 +23,24 @@ See AIP-73 for further discussion - [Resource `TransferRefStore`](#0x1_dispatchable_fungible_asset_TransferRefStore) - [Constants](#@Constants_0) - [Function `register_dispatch_functions`](#0x1_dispatchable_fungible_asset_register_dispatch_functions) +- [Function `register_derive_supply_dispatch_function`](#0x1_dispatchable_fungible_asset_register_derive_supply_dispatch_function) - [Function `withdraw`](#0x1_dispatchable_fungible_asset_withdraw) - [Function `deposit`](#0x1_dispatchable_fungible_asset_deposit) - [Function `transfer`](#0x1_dispatchable_fungible_asset_transfer) - [Function `transfer_assert_minimum_deposit`](#0x1_dispatchable_fungible_asset_transfer_assert_minimum_deposit) - [Function `derived_balance`](#0x1_dispatchable_fungible_asset_derived_balance) +- [Function `is_derived_balance_at_least`](#0x1_dispatchable_fungible_asset_is_derived_balance_at_least) +- [Function `derived_supply`](#0x1_dispatchable_fungible_asset_derived_supply) - [Function `borrow_transfer_ref`](#0x1_dispatchable_fungible_asset_borrow_transfer_ref) - [Function `dispatchable_withdraw`](#0x1_dispatchable_fungible_asset_dispatchable_withdraw) - [Function `dispatchable_deposit`](#0x1_dispatchable_fungible_asset_dispatchable_deposit) - [Function `dispatchable_derived_balance`](#0x1_dispatchable_fungible_asset_dispatchable_derived_balance) +- [Function `dispatchable_derived_supply`](#0x1_dispatchable_fungible_asset_dispatchable_derived_supply) - [Specification](#@Specification_1) - [Function `dispatchable_withdraw`](#@Specification_1_dispatchable_withdraw) - [Function `dispatchable_deposit`](#@Specification_1_dispatchable_deposit) - [Function `dispatchable_derived_balance`](#@Specification_1_dispatchable_derived_balance) + - [Function `dispatchable_derived_supply`](#@Specification_1_dispatchable_derived_supply)
use 0x1::error;
@@ -160,6 +165,36 @@ TransferRefStore doesn't exist on the fungible asset type.
 
 
 
+
+
+
+
+## Function `register_derive_supply_dispatch_function`
+
+
+
+
public fun register_derive_supply_dispatch_function(constructor_ref: &object::ConstructorRef, dispatch_function: option::Option<function_info::FunctionInfo>)
+
+ + + +
+Implementation + + +
public fun register_derive_supply_dispatch_function(
+    constructor_ref: &ConstructorRef,
+    dispatch_function: Option<FunctionInfo>
+) {
+    fungible_asset::register_derive_supply_dispatch_function(
+        constructor_ref,
+        dispatch_function
+    );
+}
+
+ + +
@@ -186,13 +221,13 @@ The semantics of deposit will be governed by the function specified in DispatchF amount: u64, ): FungibleAsset acquires TransferRefStore { fungible_asset::withdraw_sanity_check(owner, store, false); + fungible_asset::withdraw_permission_check(owner, store, amount); let func_opt = fungible_asset::withdraw_dispatch_function(store); if (option::is_some(&func_opt)) { assert!( features::dispatchable_fungible_asset_enabled(), error::aborted(ENOT_ACTIVATED) ); - let start_balance = fungible_asset::balance(store); let func = option::borrow(&func_opt); function_info::load_module_from_function(func); let fa = dispatchable_withdraw( @@ -201,11 +236,9 @@ The semantics of deposit will be governed by the function specified in DispatchF borrow_transfer_ref(store), func, ); - let end_balance = fungible_asset::balance(store); - assert!(amount <= start_balance - end_balance, error::aborted(EAMOUNT_MISMATCH)); fa } else { - fungible_asset::withdraw_internal(object::object_address(&store), amount) + fungible_asset::unchecked_withdraw(object::object_address(&store), amount) } }
@@ -249,7 +282,7 @@ The semantics of deposit will be governed by the function specified in DispatchF func ) } else { - fungible_asset::deposit_internal(object::object_address(&store), fa) + fungible_asset::unchecked_deposit(object::object_address(&store), fa) } }
@@ -364,6 +397,84 @@ The semantics of value will be governed by the function specified in DispatchFun + + + + +## Function `is_derived_balance_at_least` + +Whether the derived value of store using the overloaded hook is at least amount + +The semantics of value will be governed by the function specified in DispatchFunctionStore. + + +
#[view]
+public fun is_derived_balance_at_least<T: key>(store: object::Object<T>, amount: u64): bool
+
+ + + +
+Implementation + + +
public fun is_derived_balance_at_least<T: key>(store: Object<T>, amount: u64): bool {
+    let func_opt = fungible_asset::derived_balance_dispatch_function(store);
+    if (option::is_some(&func_opt)) {
+        assert!(
+            features::dispatchable_fungible_asset_enabled(),
+            error::aborted(ENOT_ACTIVATED)
+        );
+        let func = option::borrow(&func_opt);
+        function_info::load_module_from_function(func);
+        dispatchable_derived_balance(store, func) >= amount
+    } else {
+        fungible_asset::is_balance_at_least(store, amount)
+    }
+}
+
+ + + +
+ + + +## Function `derived_supply` + +Get the derived supply of the fungible asset using the overloaded hook. + +The semantics of supply will be governed by the function specified in DeriveSupplyDispatch. + + +
#[view]
+public fun derived_supply<T: key>(metadata: object::Object<T>): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun derived_supply<T: key>(metadata: Object<T>): Option<u128> {
+    let func_opt = fungible_asset::derived_supply_dispatch_function(metadata);
+    if (option::is_some(&func_opt)) {
+        assert!(
+            features::dispatchable_fungible_asset_enabled(),
+            error::aborted(ENOT_ACTIVATED)
+        );
+        let func = option::borrow(&func_opt);
+        function_info::load_module_from_function(func);
+        dispatchable_derived_supply(metadata, func)
+    } else {
+        fungible_asset::supply(metadata)
+    }
+}
+
+ + +
@@ -474,6 +585,31 @@ The semantics of value will be governed by the function specified in DispatchFun + + + + +## Function `dispatchable_derived_supply` + + + +
fun dispatchable_derived_supply<T: key>(metadata: object::Object<T>, function: &function_info::FunctionInfo): option::Option<u128>
+
+ + + +
+Implementation + + +
native fun dispatchable_derived_supply<T: key>(
+    metadata: Object<T>,
+    function: &FunctionInfo,
+): Option<u128>;
+
+ + +
@@ -530,6 +666,22 @@ The semantics of value will be governed by the function specified in DispatchFun +
pragma opaque;
+
+ + + + + +### Function `dispatchable_derived_supply` + + +
fun dispatchable_derived_supply<T: key>(metadata: object::Object<T>, function: &function_info::FunctionInfo): option::Option<u128>
+
+ + + +
pragma opaque;
 
diff --git a/aptos-move/framework/aptos-framework/doc/domain_account_abstraction_ed25519_hex.md b/aptos-move/framework/aptos-framework/doc/domain_account_abstraction_ed25519_hex.md new file mode 100644 index 0000000000000..4e3df6b353a88 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/domain_account_abstraction_ed25519_hex.md @@ -0,0 +1,80 @@ + + + +# Module `0x1::DERIVABLE_ACCOUNT_ABSTRACTION_ed25519_hex` + +Domain account abstraction using ed25519 hex for signing. + +Authentication takes digest, converts to hex (prefixed with 0x, with lowercase letters), +and then expects that to be signed. +authenticator is expected to be signature: vector +account_identity is raw public_key. + + +- [Constants](#@Constants_0) +- [Function `authenticate`](#0x1_DERIVABLE_ACCOUNT_ABSTRACTION_ed25519_hex_authenticate) + + +
use 0x1::auth_data;
+use 0x1::ed25519;
+use 0x1::error;
+use 0x1::string;
+use 0x1::string_utils;
+
+ + + + + +## Constants + + + + + + +
const EINVALID_SIGNATURE: u64 = 1;
+
+ + + + + +## Function `authenticate` + +Authorization function for domain account abstraction. + + +
public fun authenticate(account: signer, aa_auth_data: auth_data::AbstractionAuthData): signer
+
+ + + +
+Implementation + + +
public fun authenticate(account: signer, aa_auth_data: AbstractionAuthData): signer {
+    let hex_digest = string_utils::to_string(aa_auth_data.digest());
+
+    let public_key = new_unvalidated_public_key_from_bytes(*aa_auth_data.domain_account_identity());
+    let signature = new_signature_from_bytes(*aa_auth_data.domain_authenticator());
+    assert!(
+        ed25519::signature_verify_strict(
+            &signature,
+            &public_key,
+            *hex_digest.bytes(),
+        ),
+        error::permission_denied(EINVALID_SIGNATURE)
+    );
+
+    account
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/ethereum.md b/aptos-move/framework/aptos-framework/doc/ethereum.md deleted file mode 100644 index 0c67a4ce65fed..0000000000000 --- a/aptos-move/framework/aptos-framework/doc/ethereum.md +++ /dev/null @@ -1,421 +0,0 @@ - - - -# Module `0x1::ethereum` - - - -- [Struct `EthereumAddress`](#0x1_ethereum_EthereumAddress) -- [Constants](#@Constants_0) -- [Function `ethereum_address`](#0x1_ethereum_ethereum_address) -- [Function `ethereum_address_no_eip55`](#0x1_ethereum_ethereum_address_no_eip55) -- [Function `ethereum_address_20_bytes`](#0x1_ethereum_ethereum_address_20_bytes) -- [Function `get_inner_ethereum_address`](#0x1_ethereum_get_inner_ethereum_address) -- [Function `to_lowercase`](#0x1_ethereum_to_lowercase) -- [Function `to_eip55_checksumed_address`](#0x1_ethereum_to_eip55_checksumed_address) -- [Function `get_inner`](#0x1_ethereum_get_inner) -- [Function `assert_eip55`](#0x1_ethereum_assert_eip55) -- [Function `assert_40_char_hex`](#0x1_ethereum_assert_40_char_hex) - - -
use 0x1::aptos_hash;
-
- - - - - -## Struct `EthereumAddress` - -Represents an Ethereum address within Aptos smart contracts. -Provides structured handling, storage, and validation of Ethereum addresses. - - -
struct EthereumAddress has copy, drop, store
-
- - - -
-Fields - - -
-
-inner: vector<u8> -
-
- -
-
- - -
- - - -## Constants - - - - -Constants for ASCII character codes - - -
const ASCII_A: u8 = 65;
-
- - - - - - - -
const ASCII_A_LOWERCASE: u8 = 97;
-
- - - - - - - -
const ASCII_F_LOWERCASE: u8 = 102;
-
- - - - - - - -
const ASCII_Z: u8 = 90;
-
- - - - - - - -
const EINVALID_LENGTH: u64 = 1;
-
- - - - - -## Function `ethereum_address` - -Validates an Ethereum address against EIP-55 checksum rules and returns a new EthereumAddress. - -@param ethereum_address A 40-byte vector of unsigned 8-bit integers (hexadecimal format). -@return A validated EthereumAddress struct. -@abort If the address does not conform to EIP-55 standards. - - -
public fun ethereum_address(ethereum_address: vector<u8>): ethereum::EthereumAddress
-
- - - -
-Implementation - - -
public fun ethereum_address(ethereum_address: vector<u8>): EthereumAddress {
-    assert_eip55(ðereum_address);
-    EthereumAddress { inner: ethereum_address }
-}
-
- - - -
- - - -## Function `ethereum_address_no_eip55` - -Returns a new EthereumAddress without EIP-55 validation. - -@param ethereum_address A 40-byte vector of unsigned 8-bit integers (hexadecimal format). -@return A validated EthereumAddress struct. -@abort If the address does not conform to EIP-55 standards. - - -
public fun ethereum_address_no_eip55(ethereum_address: vector<u8>): ethereum::EthereumAddress
-
- - - -
-Implementation - - -
public fun ethereum_address_no_eip55(ethereum_address: vector<u8>): EthereumAddress {
-    assert_40_char_hex(ðereum_address);
-    EthereumAddress { inner: ethereum_address }
-}
-
- - - -
- - - -## Function `ethereum_address_20_bytes` - -Returns a new 20-byte EthereumAddress without EIP-55 validation. - -@param ethereum_address A 20-byte vector of unsigned 8-bit bytes. -@return An EthereumAddress struct. -@abort If the address does not conform to EIP-55 standards. - - -
public fun ethereum_address_20_bytes(ethereum_address: vector<u8>): ethereum::EthereumAddress
-
- - - -
-Implementation - - -
public fun ethereum_address_20_bytes(ethereum_address: vector<u8>): EthereumAddress {
-    assert!(vector::length(ðereum_address) == 20, EINVALID_LENGTH);
-    EthereumAddress { inner: ethereum_address }
-}
-
- - - -
- - - -## Function `get_inner_ethereum_address` - -Gets the inner vector of an EthereumAddress. - -@param ethereum_address A 40-byte vector of unsigned 8-bit integers (hexadecimal format). -@return The vector inner value of the EthereumAddress - - -
public fun get_inner_ethereum_address(ethereum_address: ethereum::EthereumAddress): vector<u8>
-
- - - -
-Implementation - - -
public fun get_inner_ethereum_address(ethereum_address: EthereumAddress): vector<u8> {
-    ethereum_address.inner
-}
-
- - - -
- - - -## Function `to_lowercase` - -Converts uppercase ASCII characters in a vector to their lowercase equivalents. - -@param input A reference to a vector of ASCII characters. -@return A new vector with lowercase equivalents of the input characters. -@note Only affects ASCII letters; non-alphabetic characters are unchanged. - - -
public fun to_lowercase(input: &vector<u8>): vector<u8>
-
- - - -
-Implementation - - -
public fun to_lowercase(input: &vector<u8>): vector<u8> {
-    let lowercase_bytes = vector::empty();
-    vector::enumerate_ref(input, |_i, element| {
-        let lower_byte = if (*element >= ASCII_A && *element <= ASCII_Z) {
-            *element + 32
-        } else {
-            *element
-        };
-        vector::push_back<u8>(&mut lowercase_bytes, lower_byte);
-    });
-    lowercase_bytes
-}
-
- - - -
- - - -## Function `to_eip55_checksumed_address` - -Converts an Ethereum address to EIP-55 checksummed format. - -@param ethereum_address A 40-character vector representing the Ethereum address in hexadecimal format. -@return The EIP-55 checksummed version of the input address. -@abort If the input address does not have exactly 40 characters. -@note Assumes input address is valid and in lowercase hexadecimal format. - - -
public fun to_eip55_checksumed_address(ethereum_address: &vector<u8>): vector<u8>
-
- - - -
-Implementation - - -
public fun to_eip55_checksumed_address(ethereum_address: &vector<u8>): vector<u8> {
-    assert!(vector::length(ethereum_address) == 40, 0);
-    let lowercase = to_lowercase(ethereum_address);
-    let hash = keccak256(lowercase);
-    let output = vector::empty<u8>();
-
-    for (index in 0..40) {
-        let item = *vector::borrow(ethereum_address, index);
-        if (item >= ASCII_A_LOWERCASE && item <= ASCII_F_LOWERCASE) {
-            let hash_item = *vector::borrow(&hash, index / 2);
-            if ((hash_item >> ((4 * (1 - (index % 2))) as u8)) & 0xF >= 8) {
-                vector::push_back(&mut output, item - 32);
-            } else {
-                vector::push_back(&mut output, item);
-            }
-        } else {
-            vector::push_back(&mut output, item);
-        }
-    };
-    output
-}
-
- - - -
- - - -## Function `get_inner` - - - -
public fun get_inner(eth_address: &ethereum::EthereumAddress): vector<u8>
-
- - - -
-Implementation - - -
public fun get_inner(eth_address: &EthereumAddress): vector<u8> {
-    eth_address.inner
-}
-
- - - -
- - - -## Function `assert_eip55` - -Checks if an Ethereum address conforms to the EIP-55 checksum standard. - -@param ethereum_address A reference to a 40-character vector of an Ethereum address in hexadecimal format. -@abort If the address does not match its EIP-55 checksummed version. -@note Assumes the address is correctly formatted as a 40-character hexadecimal string. - - -
public fun assert_eip55(ethereum_address: &vector<u8>)
-
- - - -
-Implementation - - -
public fun assert_eip55(ethereum_address: &vector<u8>) {
-    let eip55 = to_eip55_checksumed_address(ethereum_address);
-    let len = vector::length(&eip55);
-    for (index in 0..len) {
-        assert!(vector::borrow(&eip55, index) == vector::borrow(ethereum_address, index), 0);
-    };
-}
-
- - - -
- - - -## Function `assert_40_char_hex` - -Checks if an Ethereum address is a nonzero 40-character hexadecimal string. - -@param ethereum_address A reference to a vector of bytes representing the Ethereum address as characters. -@abort If the address is not 40 characters long, contains invalid characters, or is all zeros. - - -
public fun assert_40_char_hex(ethereum_address: &vector<u8>)
-
- - - -
-Implementation - - -
public fun assert_40_char_hex(ethereum_address: &vector<u8>) {
-    let len = vector::length(ethereum_address);
-
-    // Ensure the address is exactly 40 characters long
-    assert!(len == 40, 1);
-
-    // Ensure the address contains only valid hexadecimal characters
-    let is_zero = true;
-    for (index in 0..len) {
-        let char = *vector::borrow(ethereum_address, index);
-
-        // Check if the character is a valid hexadecimal character (0-9, a-f, A-F)
-        assert!(
-            (char >= 0x30 && char <= 0x39) || // '0' to '9'
-            (char >= 0x41 && char <= 0x46) || // 'A' to 'F'
-            (char >= 0x61 && char <= 0x66),  // 'a' to 'f'
-            2
-        );
-
-        // Check if the address is nonzero
-        if (char != 0x30) { // '0'
-            is_zero = false;
-        };
-    };
-
-    // Abort if the address is all zeros
-    assert!(!is_zero, 3);
-}
-
- - - -
- - -[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/ethereum_derivable_account.md b/aptos-move/framework/aptos-framework/doc/ethereum_derivable_account.md new file mode 100644 index 0000000000000..29dbe88940303 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/ethereum_derivable_account.md @@ -0,0 +1,491 @@ + + + +# Module `0x1::ethereum_derivable_account` + +Derivable account abstraction that verifies a message signed by +SIWE. +1. The message format is as follows: + + wants you to sign in with your Ethereum account: + + +Please confirm you explicitly initiated this request from . You are approving to execute transaction on Aptos blockchain (). + +URI: :// +Version: 1 +Chain ID: +Nonce: +Issued At: + +2. The abstract public key is a BCS serialized SIWEAbstractPublicKey. +3. The abstract signature is a BCS serialized SIWEAbstractSignature. +4. This module has been tested for the following wallets: +- Metamask +- Phantom +- Coinbase +- OKX +- Exodus +- Backpack + + +- [Enum `SIWEAbstractSignature`](#0x1_ethereum_derivable_account_SIWEAbstractSignature) +- [Struct `SIWEAbstractPublicKey`](#0x1_ethereum_derivable_account_SIWEAbstractPublicKey) +- [Constants](#@Constants_0) +- [Function `deserialize_abstract_public_key`](#0x1_ethereum_derivable_account_deserialize_abstract_public_key) +- [Function `deserialize_abstract_signature`](#0x1_ethereum_derivable_account_deserialize_abstract_signature) +- [Function `construct_message`](#0x1_ethereum_derivable_account_construct_message) +- [Function `recover_public_key`](#0x1_ethereum_derivable_account_recover_public_key) +- [Function `authenticate_auth_data`](#0x1_ethereum_derivable_account_authenticate_auth_data) +- [Function `authenticate`](#0x1_ethereum_derivable_account_authenticate) + + +
use 0x1::aptos_hash;
+use 0x1::auth_data;
+use 0x1::base16;
+use 0x1::bcs_stream;
+use 0x1::chain_id;
+use 0x1::common_account_abstractions_utils;
+use 0x1::option;
+use 0x1::secp256k1;
+use 0x1::string;
+use 0x1::string_utils;
+use 0x1::transaction_context;
+use 0x1::vector;
+
+ + + + + +## Enum `SIWEAbstractSignature` + + + +
enum SIWEAbstractSignature has drop
+
+ + + +
+Variants + + +
+MessageV1 + + +
+Fields + + +
+
+issued_at: string::String +
+
+ The date and time when the signature was issued +
+
+signature: vector<u8> +
+
+ The signature of the message +
+
+ + +
+ +
+ +
+MessageV2 + + +
+Fields + + +
+
+scheme: string::String +
+
+ The scheme in the URI of the message, e.g. the scheme of the website that requested the signature (http, https, etc.) +
+
+issued_at: string::String +
+
+ The date and time when the signature was issued +
+
+signature: vector<u8> +
+
+ The signature of the message +
+
+ + +
+ +
+ +
+ + + +## Struct `SIWEAbstractPublicKey` + + + +
struct SIWEAbstractPublicKey has drop
+
+ + + +
+Fields + + +
+
+ethereum_address: vector<u8> +
+
+ +
+
+domain: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Address mismatch. + + +
const EADDR_MISMATCH: u64 = 4;
+
+ + + + + +Signature failed to verify. + + +
const EINVALID_SIGNATURE: u64 = 1;
+
+ + + + + +Invalid signature type. + + +
const EINVALID_SIGNATURE_TYPE: u64 = 3;
+
+ + + + + +Entry function payload is missing. + + +
const EMISSING_ENTRY_FUNCTION_PAYLOAD: u64 = 2;
+
+ + + + + +Unexpected v value. + + +
const EUNEXPECTED_V: u64 = 5;
+
+ + + + + +## Function `deserialize_abstract_public_key` + +Deserializes the abstract public key which is supposed to be a bcs +serialized SIWEAbstractPublicKey. + + +
fun deserialize_abstract_public_key(abstract_public_key: &vector<u8>): ethereum_derivable_account::SIWEAbstractPublicKey
+
+ + + +
+Implementation + + +
fun deserialize_abstract_public_key(abstract_public_key: &vector<u8>): SIWEAbstractPublicKey {
+    let stream = bcs_stream::new(*abstract_public_key);
+    let ethereum_address = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+    let domain = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+    SIWEAbstractPublicKey { ethereum_address, domain }
+}
+
+ + + +
+ + + +## Function `deserialize_abstract_signature` + +Returns a tuple of the signature type and the signature. +We include the issued_at in the signature as it is a required field in the SIWE standard. + + +
fun deserialize_abstract_signature(abstract_signature: &vector<u8>): ethereum_derivable_account::SIWEAbstractSignature
+
+ + + +
+Implementation + + +
fun deserialize_abstract_signature(abstract_signature: &vector<u8>): SIWEAbstractSignature {
+    let stream = bcs_stream::new(*abstract_signature);
+    let signature_type = bcs_stream::deserialize_u8(&mut stream);
+    if (signature_type == 0x00) {
+        let issued_at = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+        let signature = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+        SIWEAbstractSignature::MessageV1 { issued_at: string::utf8(issued_at), signature }
+    } else if (signature_type == 0x01) {
+        let scheme = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+        let issued_at = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+        let signature = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+        SIWEAbstractSignature::MessageV2 { scheme: string::utf8(scheme), issued_at: string::utf8(issued_at), signature }
+    } else {
+        abort(EINVALID_SIGNATURE_TYPE)
+    }
+}
+
+ + + +
+ + + +## Function `construct_message` + + + +
fun construct_message(ethereum_address: &vector<u8>, domain: &vector<u8>, entry_function_name: &vector<u8>, digest_utf8: &vector<u8>, issued_at: &vector<u8>, scheme: &vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun construct_message(
+    ethereum_address: &vector<u8>,
+    domain: &vector<u8>,
+    entry_function_name: &vector<u8>,
+    digest_utf8: &vector<u8>,
+    issued_at: &vector<u8>,
+    scheme: &vector<u8>,
+): vector<u8> {
+    let message = &mut vector[];
+    message.append(*domain);
+    message.append(b" wants you to sign in with your Ethereum account:\n");
+    message.append(*ethereum_address);
+    message.append(b"\n\nPlease confirm you explicitly initiated this request from ");
+    message.append(*domain);
+    message.append(b".");
+    message.append(b" You are approving to execute transaction ");
+    message.append(*entry_function_name);
+    message.append(b" on Aptos blockchain");
+    let network_name = network_name();
+    message.append(b" (");
+    message.append(network_name);
+    message.append(b")");
+    message.append(b".");
+    message.append(b"\n\nURI: ");
+    message.append(*scheme);
+    message.append(b"://");
+    message.append(*domain);
+    message.append(b"\nVersion: 1");
+    message.append(b"\nChain ID: ");
+    message.append(*string_utils::to_string(&chain_id::get()).bytes());
+    message.append(b"\nNonce: ");
+    message.append(*digest_utf8);
+    message.append(b"\nIssued At: ");
+    message.append(*issued_at);
+
+    let msg_len = vector::length(message);
+
+    let prefix = b"\x19Ethereum Signed Message:\n";
+    let msg_len_string = string_utils::to_string(&msg_len); // returns string
+    let msg_len_bytes = msg_len_string.bytes(); // vector<u8>
+
+    let full_message = &mut vector[];
+    full_message.append(prefix);
+    full_message.append(*msg_len_bytes);
+    full_message.append(*message);
+
+    *full_message
+}
+
+ + + +
+ + + +## Function `recover_public_key` + + + +
fun recover_public_key(signature_bytes: &vector<u8>, message: &vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun recover_public_key(signature_bytes: &vector<u8>, message: &vector<u8>): vector<u8> {
+    let rs = vector::slice(signature_bytes, 0, 64);
+    let v = *vector::borrow(signature_bytes, 64);
+    assert!(v == 27 || v == 28, EUNEXPECTED_V);
+    let signature = secp256k1::ecdsa_signature_from_bytes(rs);
+
+    let maybe_recovered = secp256k1::ecdsa_recover(*message, v - 27, &signature);
+
+    assert!(
+        option::is_some(&maybe_recovered),
+        EINVALID_SIGNATURE
+    );
+
+    let pubkey = option::borrow(&maybe_recovered);
+
+    let pubkey_bytes = secp256k1::ecdsa_raw_public_key_to_bytes(pubkey);
+
+    // Add 0x04 prefix to the public key, to match the
+    // full uncompressed format from ethers.js
+    let full_pubkey = &mut vector[];
+    vector::push_back(full_pubkey, 4u8);
+    vector::append(full_pubkey, pubkey_bytes);
+
+    *full_pubkey
+}
+
+ + + +
+ + + +## Function `authenticate_auth_data` + + + +
fun authenticate_auth_data(aa_auth_data: auth_data::AbstractionAuthData, entry_function_name: &vector<u8>)
+
+ + + +
+Implementation + + +
fun authenticate_auth_data(
+    aa_auth_data: AbstractionAuthData,
+    entry_function_name: &vector<u8>
+) {
+    let derivable_abstract_public_key = aa_auth_data.derivable_abstract_public_key();
+    let abstract_public_key = deserialize_abstract_public_key(derivable_abstract_public_key);
+    let digest_utf8 = string_utils::to_string(aa_auth_data.digest()).bytes();
+    let abstract_signature = deserialize_abstract_signature(aa_auth_data.derivable_abstract_signature());
+    let issued_at = abstract_signature.issued_at.bytes();
+    let scheme = abstract_signature.scheme.bytes();
+    let message = construct_message(&abstract_public_key.ethereum_address, &abstract_public_key.domain, entry_function_name, digest_utf8, issued_at, scheme);
+    let hashed_message = aptos_hash::keccak256(message);
+    let public_key_bytes = recover_public_key(&abstract_signature.signature, &hashed_message);
+
+    // 1. Skip the 0x04 prefix (take the bytes after the first byte)
+    let public_key_without_prefix = vector::slice(&public_key_bytes, 1, vector::length(&public_key_bytes));
+    // 2. Run Keccak256 on the public key (without the 0x04 prefix)
+    let kexHash = aptos_hash::keccak256(public_key_without_prefix);
+    // 3. Slice the last 20 bytes (this is the Ethereum address)
+    let recovered_addr = vector::slice(&kexHash, 12, 32);
+    // 4. Remove the 0x prefix from the utf8 account address
+    let ethereum_address_without_prefix = vector::slice(&abstract_public_key.ethereum_address, 2, vector::length(&abstract_public_key.ethereum_address));
+
+    let account_address_vec = base16_utf8_to_vec_u8(ethereum_address_without_prefix);
+    // Verify that the recovered address matches the domain account identity
+    assert!(recovered_addr == account_address_vec, EADDR_MISMATCH);
+}
+
+ + + +
+ + + +## Function `authenticate` + +Authorization function for domain account abstraction. + + +
public fun authenticate(account: signer, aa_auth_data: auth_data::AbstractionAuthData): signer
+
+ + + +
+Implementation + + +
public fun authenticate(account: signer, aa_auth_data: AbstractionAuthData): signer {
+    let maybe_entry_function_payload = transaction_context::entry_function_payload();
+    if (maybe_entry_function_payload.is_some()) {
+        let entry_function_payload = maybe_entry_function_payload.destroy_some();
+        let entry_function_name = entry_function_name(&entry_function_payload);
+        authenticate_auth_data(aa_auth_data, &entry_function_name);
+        account
+    } else {
+        abort(EMISSING_ENTRY_FUNCTION_PAYLOAD)
+    }
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/event.md b/aptos-move/framework/aptos-framework/doc/event.md index 7bf257ff41fe7..9dba7bf02fa62 100644 --- a/aptos-move/framework/aptos-framework/doc/event.md +++ b/aptos-move/framework/aptos-framework/doc/event.md @@ -10,6 +10,7 @@ events emitted to a handle and emit events to the event store. - [Struct `EventHandle`](#0x1_event_EventHandle) +- [Constants](#@Constants_0) - [Function `emit`](#0x1_event_emit) - [Function `write_module_event_to_store`](#0x1_event_write_module_event_to_store) - [Function `new_event_handle`](#0x1_event_new_event_handle) @@ -18,16 +19,16 @@ events emitted to a handle and emit events to the event store. - [Function `counter`](#0x1_event_counter) - [Function `write_to_event_store`](#0x1_event_write_to_event_store) - [Function `destroy_handle`](#0x1_event_destroy_handle) -- [Specification](#@Specification_0) +- [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - - [Function `emit`](#@Specification_0_emit) - - [Function `write_module_event_to_store`](#@Specification_0_write_module_event_to_store) - - [Function `emit_event`](#@Specification_0_emit_event) - - [Function `guid`](#@Specification_0_guid) - - [Function `counter`](#@Specification_0_counter) - - [Function `write_to_event_store`](#@Specification_0_write_to_event_store) - - [Function `destroy_handle`](#@Specification_0_destroy_handle) + - [Function `emit`](#@Specification_1_emit) + - [Function `write_module_event_to_store`](#@Specification_1_write_module_event_to_store) + - [Function `emit_event`](#@Specification_1_emit_event) + - [Function `guid`](#@Specification_1_guid) + - [Function `counter`](#@Specification_1_counter) + - [Function `write_to_event_store`](#@Specification_1_write_to_event_store) + - [Function `destroy_handle`](#@Specification_1_destroy_handle)
use 0x1::bcs;
@@ -73,6 +74,22 @@ A handle for an event such that:
 
 
 
+
+
+## Constants
+
+
+
+
+An event cannot be created. This error is returned by native implementations when
+- The type tag for event is too deeply nested.
+
+
+
const ECANNOT_CREATE_EVENT: u64 = 1;
+
+ + + ## Function `emit` @@ -282,7 +299,7 @@ Destroy a unique handle. - + ## Specification @@ -353,7 +370,7 @@ Destroy a unique handle. - + ### Function `emit` @@ -369,7 +386,7 @@ Destroy a unique handle. - + ### Function `write_module_event_to_store` @@ -386,7 +403,7 @@ Native function use opaque. - + ### Function `emit_event` @@ -406,7 +423,7 @@ Native function use opaque. - + ### Function `guid` @@ -424,7 +441,7 @@ Native function use opaque. - + ### Function `counter` @@ -442,7 +459,7 @@ Native function use opaque. - + ### Function `write_to_event_store` @@ -460,7 +477,7 @@ Native function use opaque. - + ### Function `destroy_handle` diff --git a/aptos-move/framework/aptos-framework/doc/execution_config.md b/aptos-move/framework/aptos-framework/doc/execution_config.md index 06be205963675..f6baf2bb7e957 100644 --- a/aptos-move/framework/aptos-framework/doc/execution_config.md +++ b/aptos-move/framework/aptos-framework/doc/execution_config.md @@ -161,7 +161,7 @@ Only used in reconfigurations to apply the pending on_new_epoch(framework: &signer) acquires ExecutionConfig { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist<ExecutionConfig>()) { - let config = config_buffer::extract<ExecutionConfig>(); + let config = config_buffer::extract_v2<ExecutionConfig>(); if (exists<ExecutionConfig>(@aptos_framework)) { *borrow_global_mut<ExecutionConfig>(@aptos_framework) = config; } else { @@ -202,9 +202,7 @@ When setting now time must be later than last_reconfiguration_time.
pragma verify_duration_estimate = 600;
 let addr = signer::address_of(account);
-include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
 requires chain_status::is_genesis();
-requires exists<stake::ValidatorFees>(@aptos_framework);
 requires exists<staking_config::StakingRewardsConfig>(@aptos_framework);
 requires len(config) > 0;
 include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement;
diff --git a/aptos-move/framework/aptos-framework/doc/function_info.md b/aptos-move/framework/aptos-framework/doc/function_info.md
index e55ff672c939e..d47ac5f098c11 100644
--- a/aptos-move/framework/aptos-framework/doc/function_info.md
+++ b/aptos-move/framework/aptos-framework/doc/function_info.md
@@ -16,7 +16,12 @@ The function_info
 -  [Function `is_identifier`](#0x1_function_info_is_identifier)
 -  [Function `load_function_impl`](#0x1_function_info_load_function_impl)
 -  [Specification](#@Specification_1)
+    -  [Function `new_function_info`](#@Specification_1_new_function_info)
+    -  [Function `new_function_info_from_address`](#@Specification_1_new_function_info_from_address)
+    -  [Function `check_dispatch_type_compatibility`](#@Specification_1_check_dispatch_type_compatibility)
+    -  [Function `load_module_from_function`](#@Specification_1_load_module_from_function)
     -  [Function `check_dispatch_type_compatibility_impl`](#@Specification_1_check_dispatch_type_compatibility_impl)
+    -  [Function `is_identifier`](#@Specification_1_is_identifier)
     -  [Function `load_function_impl`](#@Specification_1_load_function_impl)
 
 
@@ -142,7 +147,7 @@ Creates a new function info from names.
 
 
 
-
public(friend) fun new_function_info_from_address(module_address: address, module_name: string::String, function_name: string::String): function_info::FunctionInfo
+
public fun new_function_info_from_address(module_address: address, module_name: string::String, function_name: string::String): function_info::FunctionInfo
 
@@ -151,7 +156,7 @@ Creates a new function info from names. Implementation -
public(friend) fun new_function_info_from_address(
+
public fun new_function_info_from_address(
     module_address: address,
     module_name: String,
     function_name: String,
@@ -323,7 +328,88 @@ if such module isn't accessed previously in the transaction.
 
 
 
+
+
+
+
fun spec_is_identifier(s: vector<u8>): bool;
+
+ + + + + +### Function `new_function_info` + + +
public fun new_function_info(module_signer: &signer, module_name: string::String, function_name: string::String): function_info::FunctionInfo
+
+ + + + +
aborts_if !spec_is_identifier(string::bytes(module_name));
+aborts_if !spec_is_identifier(string::bytes(function_name));
+ensures result == FunctionInfo {
+    module_address: signer::address_of(module_signer),
+    module_name,
+    function_name,
+};
+
+ + + + + +### Function `new_function_info_from_address` + + +
public fun new_function_info_from_address(module_address: address, module_name: string::String, function_name: string::String): function_info::FunctionInfo
+
+ + + + +
aborts_if !spec_is_identifier(string::bytes(module_name));
+aborts_if !spec_is_identifier(string::bytes(function_name));
+ensures result == FunctionInfo {
+    module_address,
+    module_name,
+    function_name,
+};
+
+ + + + + +### Function `check_dispatch_type_compatibility` + + +
public(friend) fun check_dispatch_type_compatibility(framework_function: &function_info::FunctionInfo, dispatch_target: &function_info::FunctionInfo): bool
+
+ + + + +
pragma verify = false;
+pragma opaque;
+
+ + + + + +### Function `load_module_from_function` + + +
public(friend) fun load_module_from_function(f: &function_info::FunctionInfo)
+
+ + + +
pragma verify = false;
+pragma opaque;
 
@@ -344,6 +430,24 @@ if such module isn't accessed previously in the transaction. + + +### Function `is_identifier` + + +
fun is_identifier(s: &vector<u8>): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_is_identifier(s);
+
+ + + ### Function `load_function_impl` diff --git a/aptos-move/framework/aptos-framework/doc/fungible_asset.md b/aptos-move/framework/aptos-framework/doc/fungible_asset.md index 25a863d8482c9..f43390ecf8dac 100644 --- a/aptos-move/framework/aptos-framework/doc/fungible_asset.md +++ b/aptos-move/framework/aptos-framework/doc/fungible_asset.md @@ -13,15 +13,20 @@ metadata object can be any object that equipped with use 0x1::function_info; use 0x1::object; use 0x1::option; +use 0x1::permissioned_signer; use 0x1::signer; use 0x1::string;
@@ -348,6 +375,34 @@ The store object that holds fungible assets of a specific type associated with a + + + + +## Resource `DeriveSupply` + + + +
#[resource_group_member(#[group = 0x1::object::ObjectGroup])]
+struct DeriveSupply has key
+
+ + + +
+Fields + + +
+
+dispatch_function: option::Option<function_info::FunctionInfo> +
+
+ +
+
+ +
@@ -455,6 +510,62 @@ and allow the holder of TransferRef to transfer fungible assets from any account +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Struct `RawBalanceRef` + +RawBalanceRef will be used to access the raw balance for FAs that registered derived_balance hook. + + +
struct RawBalanceRef has drop, store
+
+ + + +
+Fields + + +
+
+metadata: object::Object<fungible_asset::Metadata> +
+
+ +
+
+ + +
+ + + +## Struct `RawSupplyRef` + +RawSupplyRef will be used to access the raw supply for FAs that registered derived_supply hook. + + +
struct RawSupplyRef has drop, store
+
+ + +
Fields @@ -525,6 +636,45 @@ MutateMetadataRef can be used to directly modify the fungible asset's Metadata. +
+ + + +## Enum `WithdrawPermission` + + + +
enum WithdrawPermission has copy, drop, store
+
+ + + +
+Variants + + +
+ByStore + + +
+Fields + + +
+
+store_address: address +
+
+ +
+
+ + +
+ +
+
@@ -630,6 +780,47 @@ Emitted when a store's frozen status is updated. + + + + +## Struct `FungibleStoreDeletion` + +Module event emitted when a fungible store is deleted. + + +
#[event]
+struct FungibleStoreDeletion has drop, store
+
+ + + +
+Fields + + +
+
+store: address +
+
+ +
+
+owner: address +
+
+ +
+
+metadata: address +
+
+ +
+
+ +
@@ -892,6 +1083,16 @@ Provided derived_balance function type doesn't meet the signature requirement. + + +Provided derived_supply function type doesn't meet the signature requirement. + + +
const EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH: u64 = 33;
+
+ + + Fungible asset and store do not match. @@ -1013,6 +1214,26 @@ Fungibility is only available for non-deletable objects. + + +The balance ref and the fungible asset do not match. + + +
const ERAW_BALANCE_REF_AND_FUNGIBLE_ASSET_MISMATCH: u64 = 34;
+
+ + + + + +The supply ref and the fungible asset do not match. + + +
const ERAW_SUPPLY_REF_AND_FUNGIBLE_ASSET_MISMATCH: u64 = 35;
+
+ + + Store is disabled from sending and receiving this fungible asset. @@ -1093,6 +1314,16 @@ Provided withdraw function type doesn't meet the signature requirement. + + +signer don't have the permission to perform withdraw operation + + +
const EWITHDRAW_PERMISSION_DENIED: u64 = 36;
+
+ + + @@ -1115,7 +1346,7 @@ Provided withdraw function type doesn't meet the signature requirement. -
const MAX_SYMBOL_LENGTH: u64 = 10;
+
const MAX_SYMBOL_LENGTH: u64 = 32;
 
@@ -1403,28 +1634,13 @@ Create a fungible asset store whose transfer rule would be overloaded by the pro ) ); }); - - // Cannot register hook for APT. - assert!( - object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset, - error::permission_denied(EAPT_NOT_DISPATCHABLE) - ); - assert!( - !object::can_generate_delete_ref(constructor_ref), - error::invalid_argument(EOBJECT_IS_DELETABLE) - ); + register_dispatch_function_sanity_check(constructor_ref); assert!( !exists<DispatchFunctionStore>( object::address_from_constructor_ref(constructor_ref) ), error::already_exists(EALREADY_REGISTERED) ); - assert!( - exists<Metadata>( - object::address_from_constructor_ref(constructor_ref) - ), - error::not_found(EFUNGIBLE_METADATA_EXISTENCE), - ); let store_obj = &object::generate_signer(constructor_ref); @@ -1444,15 +1660,14 @@ Create a fungible asset store whose transfer rule would be overloaded by the pro - + -## Function `generate_mint_ref` +## Function `register_derive_supply_dispatch_function` -Creates a mint ref that can be used to mint fungible assets from the given fungible object's constructor ref. -This can only be called at object creation time as constructor_ref is only available then. +Define the derived supply dispatch with the provided function. -
public fun generate_mint_ref(constructor_ref: &object::ConstructorRef): fungible_asset::MintRef
+
public(friend) fun register_derive_supply_dispatch_function(constructor_ref: &object::ConstructorRef, dispatch_function: option::Option<function_info::FunctionInfo>)
 
@@ -1461,9 +1676,46 @@ This can only be called at object creation time as constructor_ref is only avail Implementation -
public fun generate_mint_ref(constructor_ref: &ConstructorRef): MintRef {
-    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
-    MintRef { metadata }
+
public(friend) fun register_derive_supply_dispatch_function(
+    constructor_ref: &ConstructorRef,
+    dispatch_function: Option<FunctionInfo>
+) {
+    // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+    option::for_each_ref(&dispatch_function, |supply_function| {
+        let function_info = function_info::new_function_info_from_address(
+            @aptos_framework,
+            string::utf8(b"dispatchable_fungible_asset"),
+            string::utf8(b"dispatchable_derived_supply"),
+        );
+        // Verify that caller type matches callee type so wrongly typed function cannot be registered.
+        assert!(
+            function_info::check_dispatch_type_compatibility(
+                &function_info,
+                supply_function
+            ),
+            error::invalid_argument(
+                EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH
+            )
+        );
+    });
+    register_dispatch_function_sanity_check(constructor_ref);
+    assert!(
+        !exists<DeriveSupply>(
+            object::address_from_constructor_ref(constructor_ref)
+        ),
+        error::already_exists(EALREADY_REGISTERED)
+    );
+
+
+    let store_obj = &object::generate_signer(constructor_ref);
+
+    // Store the overload function hook.
+    move_to<DeriveSupply>(
+        store_obj,
+        DeriveSupply {
+            dispatch_function
+        }
+    );
 }
 
@@ -1471,15 +1723,14 @@ This can only be called at object creation time as constructor_ref is only avail - + -## Function `generate_burn_ref` +## Function `register_dispatch_function_sanity_check` -Creates a burn ref that can be used to burn fungible assets from the given fungible object's constructor ref. -This can only be called at object creation time as constructor_ref is only available then. +Check the requirements for registering a dispatchable function. -
public fun generate_burn_ref(constructor_ref: &object::ConstructorRef): fungible_asset::BurnRef
+
fun register_dispatch_function_sanity_check(constructor_ref: &object::ConstructorRef)
 
@@ -1488,9 +1739,24 @@ This can only be called at object creation time as constructor_ref is only avail Implementation -
public fun generate_burn_ref(constructor_ref: &ConstructorRef): BurnRef {
-    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
-    BurnRef { metadata }
+
inline fun register_dispatch_function_sanity_check(
+    constructor_ref: &ConstructorRef,
+)  {
+    // Cannot register hook for APT.
+    assert!(
+        object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset,
+        error::permission_denied(EAPT_NOT_DISPATCHABLE)
+    );
+    assert!(
+        !object::can_generate_delete_ref(constructor_ref),
+        error::invalid_argument(EOBJECT_IS_DELETABLE)
+    );
+    assert!(
+        exists<Metadata>(
+            object::address_from_constructor_ref(constructor_ref)
+        ),
+        error::not_found(EFUNGIBLE_METADATA_EXISTENCE),
+    );
 }
 
@@ -1498,16 +1764,15 @@ This can only be called at object creation time as constructor_ref is only avail - + -## Function `generate_transfer_ref` +## Function `generate_mint_ref` -Creates a transfer ref that can be used to freeze/unfreeze/transfer fungible assets from the given fungible -object's constructor ref. +Creates a mint ref that can be used to mint fungible assets from the given fungible object's constructor ref. This can only be called at object creation time as constructor_ref is only available then. -
public fun generate_transfer_ref(constructor_ref: &object::ConstructorRef): fungible_asset::TransferRef
+
public fun generate_mint_ref(constructor_ref: &object::ConstructorRef): fungible_asset::MintRef
 
@@ -1516,9 +1781,120 @@ This can only be called at object creation time as constructor_ref is only avail Implementation -
public fun generate_transfer_ref(constructor_ref: &ConstructorRef): TransferRef {
+
public fun generate_mint_ref(constructor_ref: &ConstructorRef): MintRef {
     let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
-    TransferRef { metadata }
+    MintRef { metadata }
+}
+
+ + + + + + + +## Function `generate_burn_ref` + +Creates a burn ref that can be used to burn fungible assets from the given fungible object's constructor ref. +This can only be called at object creation time as constructor_ref is only available then. + + +
public fun generate_burn_ref(constructor_ref: &object::ConstructorRef): fungible_asset::BurnRef
+
+ + + +
+Implementation + + +
public fun generate_burn_ref(constructor_ref: &ConstructorRef): BurnRef {
+    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
+    BurnRef { metadata }
+}
+
+ + + +
+ + + +## Function `generate_transfer_ref` + +Creates a transfer ref that can be used to freeze/unfreeze/transfer fungible assets from the given fungible +object's constructor ref. +This can only be called at object creation time as constructor_ref is only available then. + + +
public fun generate_transfer_ref(constructor_ref: &object::ConstructorRef): fungible_asset::TransferRef
+
+ + + +
+Implementation + + +
public fun generate_transfer_ref(constructor_ref: &ConstructorRef): TransferRef {
+    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
+    TransferRef { metadata }
+}
+
+ + + +
+ + + +## Function `generate_raw_balance_ref` + +Creates a balance ref that can be used to access raw balance of fungible assets from the given fungible +object's constructor ref. +This can only be called at object creation time as constructor_ref is only available then. + + +
public fun generate_raw_balance_ref(constructor_ref: &object::ConstructorRef): fungible_asset::RawBalanceRef
+
+ + + +
+Implementation + + +
public fun generate_raw_balance_ref(constructor_ref: &ConstructorRef): RawBalanceRef {
+    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
+    RawBalanceRef { metadata }
+}
+
+ + + +
+ + + +## Function `generate_raw_supply_ref` + +Creates a supply ref that can be used to access raw supply of fungible assets from the given fungible +object's constructor ref. +This can only be called at object creation time as constructor_ref is only available then. + + +
public fun generate_raw_supply_ref(constructor_ref: &object::ConstructorRef): fungible_asset::RawSupplyRef
+
+ + + +
+Implementation + + +
public fun generate_raw_supply_ref(constructor_ref: &ConstructorRef): RawSupplyRef {
+    let metadata = object::object_from_constructor_ref<Metadata>(constructor_ref);
+    RawSupplyRef { metadata }
 }
 
@@ -1560,6 +1936,9 @@ This can only be called at object creation time as constructor_ref is only avail Get the current supply from the metadata object. +Note: This function will abort on FAs with derived_supply hook set up. +Use dispatchable_fungible_asset::supply instead if you intend to work with those FAs. +
#[view]
 public fun supply<T: key>(metadata: object::Object<T>): option::Option<u128>
@@ -1572,10 +1951,38 @@ Get the current supply from the metadata object.
 
 
 
public fun supply<T: key>(metadata: Object<T>): Option<u128> acquires Supply, ConcurrentSupply {
+    assert!(
+        !has_supply_dispatch_function(object::object_address(&metadata)),
+        error::invalid_argument(EINVALID_DISPATCHABLE_OPERATIONS)
+    );
+    supply_impl(metadata)
+}
+
+ + + +
+ + + +## Function `supply_impl` + + + +
fun supply_impl<T: key>(metadata: object::Object<T>): option::Option<u128>
+
+ + + +
+Implementation + + +
fun supply_impl<T: key>(metadata: Object<T>): Option<u128> acquires Supply, ConcurrentSupply {
     let metadata_address = object::object_address(&metadata);
     if (exists<ConcurrentSupply>(metadata_address)) {
         let supply = borrow_global<ConcurrentSupply>(metadata_address);
-        option::some(aggregator_v2::read(&supply.current))
+        option::some(supply.current.read())
     } else if (exists<Supply>(metadata_address)) {
         let supply = borrow_global<Supply>(metadata_address);
         option::some(supply.current)
@@ -1611,7 +2018,7 @@ If supply is unlimited (or set explicitly to MAX_U128), none is returned
     let metadata_address = object::object_address(&metadata);
     if (exists<ConcurrentSupply>(metadata_address)) {
         let supply = borrow_global<ConcurrentSupply>(metadata_address);
-        let max_value = aggregator_v2::max_value(&supply.current);
+        let max_value = supply.current.max_value();
         if (max_value == MAX_U128) {
             option::none()
         } else {
@@ -1945,6 +2352,9 @@ Return the amount of a given fungible asset.
 
 Get the balance of a given store.
 
+Note: This function will abort on FAs with derived_balance hook set up.
+Use dispatchable_fungible_asset::balance instead if you intend to work with those FAs.
+
 
 
#[view]
 public fun balance<T: key>(store: object::Object<T>): u64
@@ -1956,13 +2366,42 @@ Get the balance of a given store.
 Implementation
 
 
-
public fun balance<T: key>(store: Object<T>): u64 acquires FungibleStore, ConcurrentFungibleBalance {
+
public fun balance<T: key>(store: Object<T>): u64 acquires FungibleStore, ConcurrentFungibleBalance, DispatchFunctionStore {
+    let fa_store = borrow_store_resource(&store);
+    assert!(
+        !has_balance_dispatch_function(fa_store.metadata),
+        error::invalid_argument(EINVALID_DISPATCHABLE_OPERATIONS)
+    );
+    balance_impl(store)
+}
+
+ + + +
+ + + +## Function `balance_impl` + + + +
fun balance_impl<T: key>(store: object::Object<T>): u64
+
+ + + +
+Implementation + + +
fun balance_impl<T: key>(store: Object<T>): u64 acquires FungibleStore, ConcurrentFungibleBalance {
     let store_addr = object::object_address(&store);
     if (store_exists_inline(store_addr)) {
         let store_balance = borrow_store_resource(&store).balance;
         if (store_balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
             let balance_resource = borrow_global<ConcurrentFungibleBalance>(store_addr);
-            aggregator_v2::read(&balance_resource.balance)
+            balance_resource.balance.read()
         } else {
             store_balance
         }
@@ -2024,7 +2463,7 @@ Check whether the balance of a given store is >= amount.
         let store_balance = borrow_global<FungibleStore>(store_addr).balance;
         if (store_balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
             let balance_resource = borrow_global<ConcurrentFungibleBalance>(store_addr);
-            aggregator_v2::is_at_least(&balance_resource.balance, amount)
+            balance_resource.balance.is_at_least(amount)
         } else {
             store_balance >= amount
         }
@@ -2213,6 +2652,65 @@ Return whether a fungible asset type is dispatchable.
 
 
 
+
+ + + +## Function `has_balance_dispatch_function` + + + +
fun has_balance_dispatch_function(metadata: object::Object<fungible_asset::Metadata>): bool
+
+ + + +
+Implementation + + +
fun has_balance_dispatch_function(metadata: Object<Metadata>): bool acquires DispatchFunctionStore {
+    let metadata_addr = object::object_address(&metadata);
+    // Short circuit on APT for better perf
+    if (metadata_addr != @aptos_fungible_asset && exists<DispatchFunctionStore>(metadata_addr)) {
+        option::is_some(&borrow_global<DispatchFunctionStore>(metadata_addr).derived_balance_function)
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `has_supply_dispatch_function` + + + +
fun has_supply_dispatch_function(metadata_addr: address): bool
+
+ + + +
+Implementation + + +
fun has_supply_dispatch_function(metadata_addr: address): bool {
+    // Short circuit on APT for better perf
+    if (metadata_addr != @aptos_fungible_asset) {
+        exists<DeriveSupply>(metadata_addr)
+    } else {
+        false
+    }
+}
+
+ + +
@@ -2243,6 +2741,35 @@ Return whether a fungible asset type is dispatchable. + + + + +## Function `derived_supply_dispatch_function` + + + +
public(friend) fun derived_supply_dispatch_function<T: key>(metadata: object::Object<T>): option::Option<function_info::FunctionInfo>
+
+ + + +
+Implementation + + +
public(friend) fun derived_supply_dispatch_function<T: key>(metadata: Object<T>): Option<FunctionInfo> acquires DeriveSupply {
+    let metadata_addr = object::object_address(&metadata);
+    if (exists<DeriveSupply>(metadata_addr)) {
+        borrow_global<DeriveSupply>(metadata_addr).dispatch_function
+    } else {
+        option::none()
+    }
+}
+
+ + +
@@ -2376,6 +2903,9 @@ Get the underlying metadata object from the dispatchable_fungible_asset::transfer. You should use +that function unless you DO NOT want to support fungible assets with dispatchable hooks. +
public entry fun transfer<T: key>(sender: &signer, from: object::Object<T>, to: object::Object<T>, amount: u64)
 
@@ -2464,15 +2994,15 @@ Used to delete a store. Requires the store to be completely empty prior to remo
public fun remove_store(delete_ref: &DeleteRef) acquires FungibleStore, FungibleAssetEvents, ConcurrentFungibleBalance {
-    let store = &object::object_from_delete_ref<FungibleStore>(delete_ref);
-    let addr = object::object_address(store);
-    let FungibleStore { metadata: _, balance, frozen: _ }
+    let store = object::object_from_delete_ref<FungibleStore>(delete_ref);
+    let addr = object::object_address(&store);
+    let FungibleStore { metadata, balance, frozen: _}
         = move_from<FungibleStore>(addr);
     assert!(balance == 0, error::permission_denied(EBALANCE_IS_NOT_ZERO));
 
     if (concurrent_fungible_balance_exists_inline(addr)) {
         let ConcurrentFungibleBalance { balance } = move_from<ConcurrentFungibleBalance>(addr);
-        assert!(aggregator_v2::read(&balance) == 0, error::permission_denied(EBALANCE_IS_NOT_ZERO));
+        assert!(balance.read() == 0, error::permission_denied(EBALANCE_IS_NOT_ZERO));
     };
 
     // Cleanup deprecated event handles if exist.
@@ -2486,6 +3016,11 @@ Used to delete a store.  Requires the store to be completely empty prior to remo
         event::destroy_handle(withdraw_events);
         event::destroy_handle(frozen_events);
     };
+    event::emit(FungibleStoreDeletion {
+        store: addr,
+        owner: object::owner(store),
+        metadata: object::object_address(&metadata),
+    });
 }
 
@@ -2499,6 +3034,9 @@ Used to delete a store. Requires the store to be completely empty prior to remo Withdraw amount of the fungible asset from store by the owner. +Note: This function can be in-place replaced by dispatchable_fungible_asset::withdraw. You should use +that function unless you DO NOT want to support fungible assets with dispatchable hooks. +
public fun withdraw<T: key>(owner: &signer, store: object::Object<T>, amount: u64): fungible_asset::FungibleAsset
 
@@ -2515,7 +3053,70 @@ Withdraw amount of the fungible asset from store by th amount: u64, ): FungibleAsset acquires FungibleStore, DispatchFunctionStore, ConcurrentFungibleBalance { withdraw_sanity_check(owner, store, true); - withdraw_internal(object::object_address(&store), amount) + withdraw_permission_check(owner, store, amount); + unchecked_withdraw(object::object_address(&store), amount) +} +
+ + + + + + + +## Function `withdraw_permission_check` + +Check the permission for withdraw operation. + + +
public(friend) fun withdraw_permission_check<T: key>(owner: &signer, store: object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
public(friend) fun withdraw_permission_check<T: key>(
+    owner: &signer,
+    store: Object<T>,
+    amount: u64,
+) {
+    assert!(permissioned_signer::check_permission_consume(owner, amount as u256, WithdrawPermission::ByStore {
+        store_address: object::object_address(&store),
+    }), error::permission_denied(EWITHDRAW_PERMISSION_DENIED));
+}
+
+ + + +
+ + + +## Function `withdraw_permission_check_by_address` + +Check the permission for withdraw operation. + + +
public(friend) fun withdraw_permission_check_by_address(owner: &signer, store_address: address, amount: u64)
+
+ + + +
+Implementation + + +
public(friend) fun withdraw_permission_check_by_address(
+    owner: &signer,
+    store_address: address,
+    amount: u64,
+) {
+    assert!(permissioned_signer::check_permission_consume(owner, amount as u256, WithdrawPermission::ByStore {
+        store_address,
+    }), error::permission_denied(EWITHDRAW_PERMISSION_DENIED));
 }
 
@@ -2544,7 +3145,39 @@ Check the permission for withdraw operation. store: Object<T>, abort_on_dispatch: bool, ) acquires FungibleStore, DispatchFunctionStore { - assert!(object::owns(store, signer::address_of(owner)), error::permission_denied(ENOT_STORE_OWNER)); + withdraw_sanity_check_impl( + signer::address_of(owner), + store, + abort_on_dispatch, + ) +} +
+ + + + + + + +## Function `withdraw_sanity_check_impl` + + + +
fun withdraw_sanity_check_impl<T: key>(owner_address: address, store: object::Object<T>, abort_on_dispatch: bool)
+
+ + + +
+Implementation + + +
inline fun withdraw_sanity_check_impl<T: key>(
+    owner_address: address,
+    store: Object<T>,
+    abort_on_dispatch: bool,
+) acquires FungibleStore, DispatchFunctionStore {
+    assert!(object::owns(store, owner_address), error::permission_denied(ENOT_STORE_OWNER));
     let fa_store = borrow_store_resource(&store);
     assert!(
         !abort_on_dispatch || !has_withdraw_dispatch_function(fa_store.metadata),
@@ -2597,6 +3230,9 @@ Deposit amount of the fungible asset to store.
 
 Deposit amount of the fungible asset to store.
 
+Note: This function can be in-place replaced by dispatchable_fungible_asset::deposit. You should use
+that function unless you DO NOT want to support fungible assets with dispatchable hooks.
+
 
 
public fun deposit<T: key>(store: object::Object<T>, fa: fungible_asset::FungibleAsset)
 
@@ -2609,7 +3245,7 @@ Deposit amount of the fungible asset to store.
public fun deposit<T: key>(store: Object<T>, fa: FungibleAsset) acquires FungibleStore, DispatchFunctionStore, ConcurrentFungibleBalance {
     deposit_sanity_check(store, true);
-    deposit_internal(object::object_address(&store), fa);
+    unchecked_deposit(object::object_address(&store), fa);
 }
 
@@ -2694,7 +3330,7 @@ Mint the specified amount of the fungible asset to a destination st
public fun mint_to<T: key>(ref: &MintRef, store: Object<T>, amount: u64)
 acquires FungibleStore, Supply, ConcurrentSupply, DispatchFunctionStore, ConcurrentFungibleBalance {
     deposit_sanity_check(store, false);
-    deposit_internal(object::object_address(&store), mint(ref, amount));
+    unchecked_deposit(object::object_address(&store), mint(ref, amount));
 }
 
@@ -2848,7 +3484,7 @@ Burn the amount of the fungible asset from the given store. amount: u64 ) acquires FungibleStore, Supply, ConcurrentSupply, ConcurrentFungibleBalance { // ref metadata match is checked in burn() call - burn(ref, withdraw_internal(object::object_address(&store), amount)); + burn(ref, unchecked_withdraw(object::object_address(&store), amount)); }
@@ -2856,13 +3492,14 @@ Burn the amount of the fungible asset from the given store.
- + -## Function `address_burn_from` +## Function `address_burn_from_for_gas` +Burn the amount of the fungible asset from the given store for gas charge. -
public(friend) fun address_burn_from(ref: &fungible_asset::BurnRef, store_addr: address, amount: u64)
+
public(friend) fun address_burn_from_for_gas(ref: &fungible_asset::BurnRef, store_addr: address, amount: u64)
 
@@ -2871,13 +3508,13 @@ Burn the amount of the fungible asset from the given store. Implementation -
public(friend) fun address_burn_from(
+
public(friend) fun address_burn_from_for_gas(
     ref: &BurnRef,
     store_addr: address,
     amount: u64
 ) acquires FungibleStore, Supply, ConcurrentSupply, ConcurrentFungibleBalance {
     // ref metadata match is checked in burn() call
-    burn(ref, withdraw_internal(store_addr, amount));
+    burn(ref, unchecked_withdraw_with_no_events(store_addr, amount));
 }
 
@@ -2910,7 +3547,7 @@ Withdraw amount of the fungible asset from the store i ref.metadata == store_metadata(store), error::invalid_argument(ETRANSFER_REF_AND_STORE_MISMATCH), ); - withdraw_internal(object::object_address(&store), amount) + unchecked_withdraw(object::object_address(&store), amount) }
@@ -2943,7 +3580,7 @@ Deposit the fungible asset into the store ignoring frozenerror::invalid_argument(ETRANSFER_REF_AND_FUNGIBLE_ASSET_MISMATCH) ); - deposit_internal(object::object_address(&store), fa); + unchecked_deposit(object::object_address(&store), fa); }
@@ -2980,6 +3617,70 @@ Transfer amount of the fungible asset with + +## Function `balance_with_ref` + +Access raw balance of a store using RawBalanceRef + + +
public fun balance_with_ref<T: key>(ref: &fungible_asset::RawBalanceRef, store: object::Object<T>): u64
+
+ + + +
+Implementation + + +
public fun balance_with_ref<T: key>(
+    ref: &RawBalanceRef,
+    store: Object<T>,
+): u64 acquires FungibleStore, ConcurrentFungibleBalance {
+    assert!(
+        ref.metadata == store_metadata(store),
+        error::invalid_argument(ERAW_BALANCE_REF_AND_FUNGIBLE_ASSET_MISMATCH)
+    );
+    balance_impl(store)
+}
+
+ + + +
+ + + +## Function `supply_with_ref` + +Access raw supply of a FA using RawSupplyRef + + +
public fun supply_with_ref<T: key>(ref: &fungible_asset::RawSupplyRef, metadata: object::Object<T>): option::Option<u128>
+
+ + + +
+Implementation + + +
public fun supply_with_ref<T: key>(
+    ref: &RawSupplyRef,
+    metadata: Object<T>,
+): Option<u128> acquires Supply, ConcurrentSupply {
+    assert!(
+        object::object_address(&ref.metadata) == object::object_address(&metadata),
+        error::invalid_argument(ERAW_BALANCE_REF_AND_FUNGIBLE_ASSET_MISMATCH)
+    );
+    supply_impl(metadata)
+}
+
+ + +
@@ -3010,19 +3711,29 @@ Mutate specified fields of the fungible asset's Metadata>(metadata_address); if (option::is_some(&name)){ - mutable_metadata.name = option::extract(&mut name); + let name = option::extract(&mut name); + assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG)); + mutable_metadata.name = name; }; if (option::is_some(&symbol)){ - mutable_metadata.symbol = option::extract(&mut symbol); + let symbol = option::extract(&mut symbol); + assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG)); + mutable_metadata.symbol = symbol; }; if (option::is_some(&decimals)){ - mutable_metadata.decimals = option::extract(&mut decimals); + let decimals = option::extract(&mut decimals); + assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE)); + mutable_metadata.decimals = decimals; }; if (option::is_some(&icon_uri)){ - mutable_metadata.icon_uri = option::extract(&mut icon_uri); + let icon_uri = option::extract(&mut icon_uri); + assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.icon_uri = icon_uri; }; if (option::is_some(&project_uri)){ - mutable_metadata.project_uri = option::extract(&mut project_uri); + let project_uri = option::extract(&mut project_uri); + assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.project_uri = project_uri; }; }
@@ -3144,13 +3855,13 @@ Destroy an empty fungible asset. - + -## Function `deposit_internal` +## Function `unchecked_deposit_with_no_events_inline` -
public(friend) fun deposit_internal(store_addr: address, fa: fungible_asset::FungibleAsset)
+
fun unchecked_deposit_with_no_events_inline(store_addr: address, fa: fungible_asset::FungibleAsset): u64
 
@@ -3159,22 +3870,81 @@ Destroy an empty fungible asset. Implementation -
public(friend) fun deposit_internal(store_addr: address, fa: FungibleAsset) acquires FungibleStore, ConcurrentFungibleBalance {
+
inline fun unchecked_deposit_with_no_events_inline(
+    store_addr: address,
+    fa: FungibleAsset
+): u64 acquires FungibleStore, ConcurrentFungibleBalance {
     let FungibleAsset { metadata, amount } = fa;
-    if (amount == 0) return;
-
     assert!(exists<FungibleStore>(store_addr), error::not_found(EFUNGIBLE_STORE_EXISTENCE));
     let store = borrow_global_mut<FungibleStore>(store_addr);
     assert!(metadata == store.metadata, error::invalid_argument(EFUNGIBLE_ASSET_AND_STORE_MISMATCH));
 
-    if (store.balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
-        let balance_resource = borrow_global_mut<ConcurrentFungibleBalance>(store_addr);
-        aggregator_v2::add(&mut balance_resource.balance, amount);
-    } else {
-        store.balance = store.balance + amount;
+    if (amount != 0) {
+        if (store.balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
+            let balance_resource = borrow_global_mut<ConcurrentFungibleBalance>(store_addr);
+            balance_resource.balance.add(amount);
+        } else {
+            store.balance = store.balance + amount;
+        };
     };
+    amount
+}
+
+ + + + + + + +## Function `unchecked_deposit` + + + +
public(friend) fun unchecked_deposit(store_addr: address, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public(friend) fun unchecked_deposit(
+    store_addr: address,
+    fa: FungibleAsset
+) acquires FungibleStore, ConcurrentFungibleBalance {
+    let amount = unchecked_deposit_with_no_events_inline(store_addr, fa);
+    if (amount != 0) {
+        event::emit(Deposit { store: store_addr, amount });
+    }
+}
+
+ + + +
+ + + +## Function `unchecked_deposit_with_no_events` - event::emit(Deposit { store: store_addr, amount }); + + +
public(friend) fun unchecked_deposit_with_no_events(store_addr: address, fa: fungible_asset::FungibleAsset)
+
+ + + +
+Implementation + + +
public(friend) fun unchecked_deposit_with_no_events(
+    store_addr: address,
+    fa: FungibleAsset
+) acquires FungibleStore, ConcurrentFungibleBalance {
+    unchecked_deposit_with_no_events_inline(store_addr, fa);
 }
 
@@ -3182,14 +3952,14 @@ Destroy an empty fungible asset.
- + -## Function `withdraw_internal` +## Function `unchecked_withdraw` -Extract amount of the fungible asset from store. +Extract amount of the fungible asset from store emitting event. -
public(friend) fun withdraw_internal(store_addr: address, amount: u64): fungible_asset::FungibleAsset
+
public(friend) fun unchecked_withdraw(store_addr: address, amount: u64): fungible_asset::FungibleAsset
 
@@ -3198,7 +3968,39 @@ Extract amount of the fungible asset from store. Implementation -
public(friend) fun withdraw_internal(
+
public(friend) fun unchecked_withdraw(
+    store_addr: address,
+    amount: u64
+): FungibleAsset acquires FungibleStore, ConcurrentFungibleBalance {
+    let fa = unchecked_withdraw_with_no_events(store_addr, amount);
+    if (amount != 0) {
+        event::emit<Withdraw>(Withdraw { store: store_addr, amount });
+    };
+    fa
+}
+
+ + + + + + + +## Function `unchecked_withdraw_with_no_events` + +Extract amount of the fungible asset from store w/o emitting event. + + +
fun unchecked_withdraw_with_no_events(store_addr: address, amount: u64): fungible_asset::FungibleAsset
+
+ + + +
+Implementation + + +
inline fun unchecked_withdraw_with_no_events(
     store_addr: address,
     amount: u64,
 ): FungibleAsset acquires FungibleStore, ConcurrentFungibleBalance {
@@ -3210,15 +4012,13 @@ Extract amount of the fungible asset from store.
         if (store.balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) {
             let balance_resource = borrow_global_mut<ConcurrentFungibleBalance>(store_addr);
             assert!(
-                aggregator_v2::try_sub(&mut balance_resource.balance, amount),
+                balance_resource.balance.try_sub(amount),
                 error::invalid_argument(EINSUFFICIENT_BALANCE)
             );
         } else {
             assert!(store.balance >= amount, error::invalid_argument(EINSUFFICIENT_BALANCE));
             store.balance = store.balance - amount;
         };
-
-        event::emit<Withdraw>(Withdraw { store: store_addr, amount });
     };
     FungibleAsset { metadata, amount }
 }
@@ -3253,7 +4053,7 @@ Increase the supply of a fungible asset by minting.
     if (exists<ConcurrentSupply>(metadata_address)) {
         let supply = borrow_global_mut<ConcurrentSupply>(metadata_address);
         assert!(
-            aggregator_v2::try_add(&mut supply.current, (amount as u128)),
+            supply.current.try_add(amount as u128),
             error::out_of_range(EMAX_SUPPLY_EXCEEDED)
         );
     } else if (exists<Supply>(metadata_address)) {
@@ -3302,7 +4102,7 @@ Decrease the supply of a fungible asset by burning.
         let supply = borrow_global_mut<ConcurrentSupply>(metadata_address);
 
         assert!(
-            aggregator_v2::try_sub(&mut supply.current, (amount as u128)),
+            supply.current.try_sub(amount as u128),
             error::out_of_range(ESUPPLY_UNDERFLOW)
         );
     } else if (exists<Supply>(metadata_address)) {
@@ -3512,6 +4312,137 @@ Ensure a known 
+
+## Function `grant_permission_by_store`
+
+Permission management
+
+Master signer grant permissioned signer ability to withdraw a given amount of fungible asset.
+
+
+
public fun grant_permission_by_store<T: key>(master: &signer, permissioned: &signer, store: object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
public fun grant_permission_by_store<T: key>(
+    master: &signer,
+    permissioned: &signer,
+    store: Object<T>,
+    amount: u64
+) {
+    permissioned_signer::authorize_increase(
+        master,
+        permissioned,
+        amount as u256,
+        WithdrawPermission::ByStore {
+            store_address: object::object_address(&store),
+        }
+    )
+}
+
+ + + +
+ + + +## Function `grant_permission_by_address` + + + +
public(friend) fun grant_permission_by_address(master: &signer, permissioned: &signer, store_address: address, amount: u64)
+
+ + + +
+Implementation + + +
public(friend) fun grant_permission_by_address(
+    master: &signer,
+    permissioned: &signer,
+    store_address: address,
+    amount: u64
+) {
+    permissioned_signer::authorize_increase(
+        master,
+        permissioned,
+        amount as u256,
+        WithdrawPermission::ByStore { store_address }
+    )
+}
+
+ + + +
+ + + +## Function `refill_permission` + + + +
public(friend) fun refill_permission(permissioned: &signer, amount: u64, store_address: address)
+
+ + + +
+Implementation + + +
public(friend) fun refill_permission(
+    permissioned: &signer,
+    amount: u64,
+    store_address: address,
+) {
+    permissioned_signer::increase_limit(
+        permissioned,
+        amount as u256,
+        WithdrawPermission::ByStore { store_address }
+    )
+}
+
+ + + +
+ + + +## Function `revoke_permission` + +Removing permissions from permissioned signer. + + +
#[deprecated]
+public fun revoke_permission(_permissioned: &signer, _token_type: object::Object<fungible_asset::Metadata>)
+
+ + + +
+Implementation + + +
public fun revoke_permission(_permissioned: &signer, _token_type: Object<Metadata>) {
+    abort 0
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-framework/doc/gas_schedule.md b/aptos-move/framework/aptos-framework/doc/gas_schedule.md index ea4a92886698f..faadf330cb4ad 100644 --- a/aptos-move/framework/aptos-framework/doc/gas_schedule.md +++ b/aptos-move/framework/aptos-framework/doc/gas_schedule.md @@ -362,7 +362,7 @@ Only used in reconfigurations to apply the pending on_new_epoch(framework: &signer) acquires GasScheduleV2 { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist<GasScheduleV2>()) { - let new_gas_schedule = config_buffer::extract<GasScheduleV2>(); + let new_gas_schedule = config_buffer::extract_v2<GasScheduleV2>(); if (exists<GasScheduleV2>(@aptos_framework)) { *borrow_global_mut<GasScheduleV2>(@aptos_framework) = new_gas_schedule; } else { @@ -525,10 +525,8 @@ Only used in reconfigurations to apply the pending stake::ValidatorFees>(@aptos_framework); requires exists<CoinInfo<AptosCoin>>(@aptos_framework); requires chain_status::is_genesis(); -include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; // This enforces high-level requirement 2: include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework }; @@ -621,10 +619,8 @@ Only used in reconfigurations to apply the pending stake::ValidatorFees>(@aptos_framework); requires exists<CoinInfo<AptosCoin>>(@aptos_framework); include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework }; -include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; aborts_if !exists<StorageGasConfig>(@aptos_framework); ensures global<StorageGasConfig>(@aptos_framework) == config; diff --git a/aptos-move/framework/aptos-framework/doc/genesis.md b/aptos-move/framework/aptos-framework/doc/genesis.md index ded5b7e242f39..9245805fe983a 100644 --- a/aptos-move/framework/aptos-framework/doc/genesis.md +++ b/aptos-move/framework/aptos-framework/doc/genesis.md @@ -12,7 +12,6 @@ - [Constants](#@Constants_0) - [Function `initialize`](#0x1_genesis_initialize) - [Function `initialize_aptos_coin`](#0x1_genesis_initialize_aptos_coin) -- [Function `initialize_governed_gas_pool`](#0x1_genesis_initialize_governed_gas_pool) - [Function `initialize_core_resources_and_aptos_coin`](#0x1_genesis_initialize_core_resources_and_aptos_coin) - [Function `create_accounts`](#0x1_genesis_create_accounts) - [Function `create_account`](#0x1_genesis_create_account) @@ -22,7 +21,6 @@ - [Function `create_initialize_validator`](#0x1_genesis_create_initialize_validator) - [Function `initialize_validator`](#0x1_genesis_initialize_validator) - [Function `set_genesis_end`](#0x1_genesis_set_genesis_end) -- [Function `initialize_for_verification`](#0x1_genesis_initialize_for_verification) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) @@ -31,8 +29,8 @@ - [Function `create_initialize_validators_with_commission`](#@Specification_1_create_initialize_validators_with_commission) - [Function `create_initialize_validators`](#@Specification_1_create_initialize_validators) - [Function `create_initialize_validator`](#@Specification_1_create_initialize_validator) + - [Function `initialize_validator`](#@Specification_1_initialize_validator) - [Function `set_genesis_end`](#@Specification_1_set_genesis_end) - - [Function `initialize_for_verification`](#@Specification_1_initialize_for_verification)
use 0x1::account;
@@ -48,11 +46,9 @@
 use 0x1::create_signer;
 use 0x1::error;
 use 0x1::execution_config;
-use 0x1::features;
 use 0x1::fixed_point32;
 use 0x1::gas_schedule;
-use 0x1::governed_gas_pool;
-use 0x1::native_bridge;
+use 0x1::nonce_validation;
 use 0x1::reconfiguration;
 use 0x1::simple_map;
 use 0x1::stake;
@@ -330,7 +326,6 @@ Genesis step 1: Initialize aptos framework account and core modules on chain.
         b"multi_agent_script_prologue",
         b"epilogue",
     );
-
     // Give the decentralized on-chain governance control over the core framework account.
     aptos_governance::store_signer_cap(&aptos_framework_account, @aptos_framework, aptos_framework_signer_cap);
 
@@ -346,6 +341,7 @@ Genesis step 1: Initialize aptos framework account and core modules on chain.
     execution_config::set(&aptos_framework_account, execution_config);
     version::initialize(&aptos_framework_account, initial_version);
     stake::initialize(&aptos_framework_account);
+    timestamp::set_time_has_started(&aptos_framework_account);
     staking_config::initialize(
         &aptos_framework_account,
         minimum_stake,
@@ -361,14 +357,12 @@ Genesis step 1: Initialize aptos framework account and core modules on chain.
 
     // Ensure we can create aggregators for supply, but not enable it for common use just yet.
     aggregator_factory::initialize_aggregator_factory(&aptos_framework_account);
-    coin::initialize_supply_config(&aptos_framework_account);
 
     chain_id::initialize(&aptos_framework_account, chain_id);
     reconfiguration::initialize(&aptos_framework_account);
     block::initialize(&aptos_framework_account, epoch_interval_microsecs);
     state_storage::initialize(&aptos_framework_account);
-    timestamp::set_time_has_started(&aptos_framework_account);
-    native_bridge::initialize(&aptos_framework_account);
+    nonce_validation::initialize(&aptos_framework_account);
 }
 
@@ -409,33 +403,6 @@ Genesis step 2: Initialize Aptos coin. -
- - - -## Function `initialize_governed_gas_pool` - - - -
fun initialize_governed_gas_pool(aptos_framework: &signer, delegation_pool_creation_seed: vector<u8>)
-
- - - -
-Implementation - - -
fun initialize_governed_gas_pool(
-    aptos_framework: &signer,
-    delegation_pool_creation_seed: vector<u8>,
-) {
-    governed_gas_pool::initialize(aptos_framework, delegation_pool_creation_seed);
-}
-
- - -
@@ -469,6 +436,7 @@ Only called for testnets and e2e tests. transaction_fee::store_aptos_coin_burn_cap(aptos_framework, burn_cap); // Give transaction_fee module MintCapability<AptosCoin> so it can mint refunds. transaction_fee::store_aptos_coin_mint_cap(aptos_framework, mint_cap); + let core_resources = account::create_account(@core_resources); account::rotate_authentication_key_internal(&core_resources, core_resources_auth_key); aptos_account::register_apt(&core_resources); // registers APT store @@ -536,14 +504,17 @@ If it exists, it just returns the signer.
fun create_account(aptos_framework: &signer, account_address: address, balance: u64): signer {
-    if (account::exists_at(account_address)) {
+    let account = if (account::exists_at(account_address)) {
         create_signer(account_address)
     } else {
-        let account = account::create_account(account_address);
+        account::create_account(account_address)
+    };
+
+    if (coin::balance<AptosCoin>(account_address) == 0) {
         coin::register<AptosCoin>(&account);
         aptos_coin::mint(aptos_framework, account_address, balance);
-        account
-    }
+    };
+    account
 }
 
@@ -634,6 +605,8 @@ If it exists, it just returns the signer. }; let validator = &employee_group.validator.validator_config; + // These checks ensure that validator accounts have 0x1::Account resource. + // So, validator accounts can't be stateless. assert!( account::exists_at(validator.owner_address), error::not_found(EACCOUNT_DOES_NOT_EXIST), @@ -856,80 +829,6 @@ The last step of genesis. - - - - -## Function `initialize_for_verification` - - - -
#[verify_only]
-fun initialize_for_verification(gas_schedule: vector<u8>, chain_id: u8, initial_version: u64, consensus_config: vector<u8>, execution_config: vector<u8>, epoch_interval_microsecs: u64, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64, aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64, accounts: vector<genesis::AccountMap>, employee_vesting_start: u64, employee_vesting_period_duration: u64, employees: vector<genesis::EmployeeAccountMap>, validators: vector<genesis::ValidatorConfigurationWithCommission>)
-
- - - -
-Implementation - - -
fun initialize_for_verification(
-    gas_schedule: vector<u8>,
-    chain_id: u8,
-    initial_version: u64,
-    consensus_config: vector<u8>,
-    execution_config: vector<u8>,
-    epoch_interval_microsecs: u64,
-    minimum_stake: u64,
-    maximum_stake: u64,
-    recurring_lockup_duration_secs: u64,
-    allow_validator_set_change: bool,
-    rewards_rate: u64,
-    rewards_rate_denominator: u64,
-    voting_power_increase_limit: u64,
-    aptos_framework: &signer,
-    min_voting_threshold: u128,
-    required_proposer_stake: u64,
-    voting_duration_secs: u64,
-    accounts: vector<AccountMap>,
-    employee_vesting_start: u64,
-    employee_vesting_period_duration: u64,
-    employees: vector<EmployeeAccountMap>,
-    validators: vector<ValidatorConfigurationWithCommission>
-) {
-    initialize(
-        gas_schedule,
-        chain_id,
-        initial_version,
-        consensus_config,
-        execution_config,
-        epoch_interval_microsecs,
-        minimum_stake,
-        maximum_stake,
-        recurring_lockup_duration_secs,
-        allow_validator_set_change,
-        rewards_rate,
-        rewards_rate_denominator,
-        voting_power_increase_limit
-    );
-    features::change_feature_flags_for_verification(aptos_framework, vector[1, 2], vector[]);
-    initialize_aptos_coin(aptos_framework);
-    aptos_governance::initialize_for_verification(
-        aptos_framework,
-        min_voting_threshold,
-        required_proposer_stake,
-        voting_duration_secs
-    );
-    create_accounts(aptos_framework, accounts);
-    create_employee_validators(employee_vesting_start, employee_vesting_period_duration, employees);
-    create_initialize_validators_with_commission(aptos_framework, true, validators);
-    set_genesis_end(aptos_framework);
-}
-
- - -
@@ -1131,7 +1030,24 @@ The last step of genesis. -
include stake::ResourceRequirement;
+
pragma verify_duration_estimate = 120;
+include stake::ResourceRequirement;
+
+ + + + + +### Function `initialize_validator` + + +
fun initialize_validator(pool_address: address, validator: &genesis::ValidatorConfiguration)
+
+ + + + +
pragma verify_duration_estimate = 120;
 
@@ -1169,10 +1085,8 @@ The last step of genesis. requires chain_status::is_operating(); requires len(execution_config) > 0; requires exists<staking_config::StakingRewardsConfig>(@aptos_framework); - requires exists<stake::ValidatorFees>(@aptos_framework); requires exists<coin::CoinInfo<AptosCoin>>(@aptos_framework); include CompareTimeRequires; - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; }
@@ -1189,22 +1103,4 @@ The last step of genesis.
- - - -### Function `initialize_for_verification` - - -
#[verify_only]
-fun initialize_for_verification(gas_schedule: vector<u8>, chain_id: u8, initial_version: u64, consensus_config: vector<u8>, execution_config: vector<u8>, epoch_interval_microsecs: u64, minimum_stake: u64, maximum_stake: u64, recurring_lockup_duration_secs: u64, allow_validator_set_change: bool, rewards_rate: u64, rewards_rate_denominator: u64, voting_power_increase_limit: u64, aptos_framework: &signer, min_voting_threshold: u128, required_proposer_stake: u64, voting_duration_secs: u64, accounts: vector<genesis::AccountMap>, employee_vesting_start: u64, employee_vesting_period_duration: u64, employees: vector<genesis::EmployeeAccountMap>, validators: vector<genesis::ValidatorConfigurationWithCommission>)
-
- - - - -
pragma verify_duration_estimate = 120;
-include InitalizeRequires;
-
- - [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/governed_gas_pool.md b/aptos-move/framework/aptos-framework/doc/governed_gas_pool.md deleted file mode 100644 index 1420628935966..0000000000000 --- a/aptos-move/framework/aptos-framework/doc/governed_gas_pool.md +++ /dev/null @@ -1,568 +0,0 @@ - - - -# Module `0x1::governed_gas_pool` - - - -- [Resource `GovernedGasPool`](#0x1_governed_gas_pool_GovernedGasPool) -- [Constants](#@Constants_0) -- [Function `primary_fungible_store_address`](#0x1_governed_gas_pool_primary_fungible_store_address) -- [Function `create_resource_account_seed`](#0x1_governed_gas_pool_create_resource_account_seed) -- [Function `initialize`](#0x1_governed_gas_pool_initialize) -- [Function `init_module`](#0x1_governed_gas_pool_init_module) -- [Function `governed_gas_signer`](#0x1_governed_gas_pool_governed_gas_signer) -- [Function `governed_gas_pool_address`](#0x1_governed_gas_pool_governed_gas_pool_address) -- [Function `fund`](#0x1_governed_gas_pool_fund) -- [Function `deposit`](#0x1_governed_gas_pool_deposit) -- [Function `deposit_from`](#0x1_governed_gas_pool_deposit_from) -- [Function `deposit_from_fungible_store`](#0x1_governed_gas_pool_deposit_from_fungible_store) -- [Function `deposit_gas_fee`](#0x1_governed_gas_pool_deposit_gas_fee) -- [Function `deposit_gas_fee_v2`](#0x1_governed_gas_pool_deposit_gas_fee_v2) -- [Function `get_balance`](#0x1_governed_gas_pool_get_balance) -- [Specification](#@Specification_1) - - [Function `initialize`](#@Specification_1_initialize) - - [Function `fund`](#@Specification_1_fund) - - [Function `deposit`](#@Specification_1_deposit) - - [Function `deposit_gas_fee`](#@Specification_1_deposit_gas_fee) - - -
use 0x1::account;
-use 0x1::aptos_account;
-use 0x1::aptos_coin;
-use 0x1::coin;
-use 0x1::features;
-use 0x1::fungible_asset;
-use 0x1::object;
-use 0x1::signer;
-use 0x1::system_addresses;
-use 0x1::vector;
-
- - - - - -## Resource `GovernedGasPool` - -The Governed Gas Pool -Internally, this is a simply wrapper around a resource account. - - -
struct GovernedGasPool has key
-
- - - -
-Fields - - -
-
-signer_capability: account::SignerCapability -
-
- The signer capability of the resource account. -
-
- - -
- - - -## Constants - - - - - - -
const MODULE_SALT: vector<u8> = [97, 112, 116, 111, 115, 95, 102, 114, 97, 109, 101, 119, 111, 114, 107, 58, 58, 103, 111, 118, 101, 114, 110, 101, 100, 95, 103, 97, 115, 95, 112, 111, 111, 108];
-
- - - - - -## Function `primary_fungible_store_address` - -Address of APT Primary Fungible Store - - -
fun primary_fungible_store_address(account: address): address
-
- - - -
-Implementation - - -
inline fun primary_fungible_store_address(account: address): address {
-    object::create_user_derived_object_address(account, @aptos_fungible_asset)
-}
-
- - - -
- - - -## Function `create_resource_account_seed` - -Create the seed to derive the resource account address. - - -
fun create_resource_account_seed(delegation_pool_creation_seed: vector<u8>): vector<u8>
-
- - - -
-Implementation - - -
fun create_resource_account_seed(
-    delegation_pool_creation_seed: vector<u8>,
-): vector<u8> {
-    let seed = vector::empty<u8>();
-    // include module salt (before any subseeds) to avoid conflicts with other modules creating resource accounts
-    vector::append(&mut seed, MODULE_SALT);
-    // include an additional salt in case the same resource account has already been created
-    vector::append(&mut seed, delegation_pool_creation_seed);
-    seed
-}
-
- - - -
- - - -## Function `initialize` - -Initializes the governed gas pool around a resource account creation seed. -@param aptos_framework The signer of the aptos_framework module. -@param delegation_pool_creation_seed The seed to be used to create the resource account hosting the delegation pool. - - -
public fun initialize(aptos_framework: &signer, delegation_pool_creation_seed: vector<u8>)
-
- - - -
-Implementation - - -
public fun initialize(
-    aptos_framework: &signer,
-    delegation_pool_creation_seed: vector<u8>,
-) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-
-    // return if the governed gas pool has already been initialized
-    if (exists<GovernedGasPool>(signer::address_of(aptos_framework))) {
-        return
-    };
-
-    // generate a seed to be used to create the resource account hosting the delegation pool
-    let seed = create_resource_account_seed(delegation_pool_creation_seed);
-
-    let (governed_gas_pool_signer, governed_gas_pool_signer_cap) = account::create_resource_account(aptos_framework, seed);
-
-    // register apt
-    aptos_account::register_apt(&governed_gas_pool_signer);
-
-    move_to(aptos_framework, GovernedGasPool{
-        signer_capability: governed_gas_pool_signer_cap,
-    });
-}
-
- - - -
- - - -## Function `init_module` - -Initialize the governed gas pool as a module -@param aptos_framework The signer of the aptos_framework module. - - -
fun init_module(aptos_framework: &signer)
-
- - - -
-Implementation - - -
fun init_module(aptos_framework: &signer) {
-    // Initialize the governed gas pool
-    let seed : vector<u8> = b"aptos_framework::governed_gas_pool";
-    initialize(aptos_framework, seed);
-}
-
- - - -
- - - -## Function `governed_gas_signer` - -Borrows the signer of the governed gas pool. -@return The signer of the governed gas pool. - - -
fun governed_gas_signer(): signer
-
- - - -
-Implementation - - -
fun governed_gas_signer(): signer acquires GovernedGasPool {
-    let signer_cap = &borrow_global<GovernedGasPool>(@aptos_framework).signer_capability;
-    create_signer_with_capability(signer_cap)
-}
-
- - - -
- - - -## Function `governed_gas_pool_address` - -Gets the address of the governed gas pool. -@return The address of the governed gas pool. - - -
#[view]
-public fun governed_gas_pool_address(): address
-
- - - -
-Implementation - - -
public fun governed_gas_pool_address(): address acquires GovernedGasPool {
-    signer::address_of(&governed_gas_signer())
-}
-
- - - -
- - - -## Function `fund` - -Funds the destination account with a given amount of coin. -@param account The account to be funded. -@param amount The amount of coin to be funded. - - -
public fun fund<CoinType>(aptos_framework: &signer, account: address, amount: u64)
-
- - - -
-Implementation - - -
public fun fund<CoinType>(aptos_framework: &signer, account: address, amount: u64) acquires GovernedGasPool {
-    // Check that the Aptos framework is the caller
-    // This is what ensures that funding can only be done by the Aptos framework,
-    // i.e., via a governance proposal.
-    system_addresses::assert_aptos_framework(aptos_framework);
-    let governed_gas_signer = &governed_gas_signer();
-    coin::deposit(account, coin::withdraw<CoinType>(governed_gas_signer, amount));
-}
-
- - - -
- - - -## Function `deposit` - -Deposits some coin into the governed gas pool. -@param coin The coin to be deposited. - - -
fun deposit<CoinType>(coin: coin::Coin<CoinType>)
-
- - - -
-Implementation - - -
fun deposit<CoinType>(coin: Coin<CoinType>) acquires GovernedGasPool {
-    let governed_gas_pool_address = governed_gas_pool_address();
-    coin::deposit(governed_gas_pool_address, coin);
-}
-
- - - -
- - - -## Function `deposit_from` - -Deposits some coin from an account to the governed gas pool. -@param account The account from which the coin is to be deposited. -@param amount The amount of coin to be deposited. - - -
fun deposit_from<CoinType>(account: address, amount: u64)
-
- - - -
-Implementation - - -
fun deposit_from<CoinType>(account: address, amount: u64) acquires GovernedGasPool {
-   deposit(coin::withdraw_from<CoinType>(account, amount));
-}
-
- - - -
- - - -## Function `deposit_from_fungible_store` - -Deposits some FA from the fungible store. -@param aptos_framework The signer of the aptos_framework module. -@param account The account from which the FA is to be deposited. -@param amount The amount of FA to be deposited. - - -
fun deposit_from_fungible_store(account: address, amount: u64)
-
- - - -
-Implementation - - -
fun deposit_from_fungible_store(account: address, amount: u64) acquires GovernedGasPool {
-    if (amount > 0){
-        // compute the governed gas pool store address
-        let governed_gas_pool_address = governed_gas_pool_address();
-        let governed_gas_pool_store_address = primary_fungible_store_address(governed_gas_pool_address);
-
-        // compute the account store address
-        let account_store_address = primary_fungible_store_address(account);
-        fungible_asset::deposit_internal(
-            governed_gas_pool_store_address,
-            fungible_asset::withdraw_internal(
-                account_store_address,
-                amount
-            )
-        );
-    }
-}
-
- - - -
- - - -## Function `deposit_gas_fee` - -Deposits gas fees into the governed gas pool. -@param gas_payer The address of the account that paid the gas fees. -@param gas_fee The amount of gas fees to be deposited. - - -
public fun deposit_gas_fee(_gas_payer: address, _gas_fee: u64)
-
- - - -
-Implementation - - -
public fun deposit_gas_fee(_gas_payer: address, _gas_fee: u64) acquires GovernedGasPool {
-    // get the sender to preserve the signature but do nothing
-    governed_gas_pool_address();
-}
-
- - - -
- - - -## Function `deposit_gas_fee_v2` - -Deposits gas fees into the governed gas pool. -@param gas_payer The address of the account that paid the gas fees. -@param gas_fee The amount of gas fees to be deposited. - - -
public(friend) fun deposit_gas_fee_v2(gas_payer: address, gas_fee: u64)
-
- - - -
-Implementation - - -
public(friend) fun deposit_gas_fee_v2(gas_payer: address, gas_fee: u64) acquires GovernedGasPool {
-   if (features::operations_default_to_fa_apt_store_enabled()) {
-        deposit_from_fungible_store(gas_payer, gas_fee);
-    } else {
-        deposit_from<AptosCoin>(gas_payer, gas_fee);
-    };
-}
-
- - - -
- - - -## Function `get_balance` - -Gets the balance of a specified coin type in the governed gas pool. -@return The balance of the coin in the pool. - - -
#[view]
-public fun get_balance<CoinType>(): u64
-
- - - -
-Implementation - - -
public fun get_balance<CoinType>(): u64 acquires GovernedGasPool {
-    let pool_address = governed_gas_pool_address();
-    coin::balance<CoinType>(pool_address)
-}
-
- - - -
- - - -## Specification - - - -
// This enforces high-level requirement 1:
-invariant exists<GovernedGasPool>(@aptos_framework);
-
- - - - - -### Function `initialize` - - -
public fun initialize(aptos_framework: &signer, delegation_pool_creation_seed: vector<u8>)
-
- - - - -
requires system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
-// This enforces high-level requirement 1:
-ensures exists<GovernedGasPool>(@aptos_framework);
-
- - - - - -### Function `fund` - - -
public fun fund<CoinType>(aptos_framework: &signer, account: address, amount: u64)
-
- - - - -
pragma aborts_if_is_partial = true;
-// This enforces high-level requirement 4:
-aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
-
- - -Abort if the governed gas pool has insufficient funds - - -
aborts_with coin::EINSUFFICIENT_BALANCE, error::invalid_argument(EINSUFFICIENT_BALANCE), 0x1, 0x5, 0x7;
-
- - - - - -### Function `deposit` - - -
fun deposit<CoinType>(coin: coin::Coin<CoinType>)
-
- - - - -
pragma aborts_if_is_partial = true;
-
- - - - - -### Function `deposit_gas_fee` - - -
public fun deposit_gas_fee(_gas_payer: address, _gas_fee: u64)
-
- - -[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/jwk_consensus_config.md b/aptos-move/framework/aptos-framework/doc/jwk_consensus_config.md index b2f0f012886d9..4c3902ff3a304 100644 --- a/aptos-move/framework/aptos-framework/doc/jwk_consensus_config.md +++ b/aptos-move/framework/aptos-framework/doc/jwk_consensus_config.md @@ -249,7 +249,7 @@ Only used in reconfigurations to apply the pending on_new_epoch(framework: &signer) acquires JWKConsensusConfig { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist<JWKConsensusConfig>()) { - let new_config = config_buffer::extract<JWKConsensusConfig>(); + let new_config = config_buffer::extract_v2<JWKConsensusConfig>(); if (exists<JWKConsensusConfig>(@aptos_framework)) { *borrow_global_mut<JWKConsensusConfig>(@aptos_framework) = new_config; } else { diff --git a/aptos-move/framework/aptos-framework/doc/jwks.md b/aptos-move/framework/aptos-framework/doc/jwks.md index e0c09f7fa7214..a505aea47875f 100644 --- a/aptos-move/framework/aptos-framework/doc/jwks.md +++ b/aptos-move/framework/aptos-framework/doc/jwks.md @@ -27,7 +27,10 @@ have a simple layout which is easily accessible in Rust. - [Struct `PatchUpsertJWK`](#0x1_jwks_PatchUpsertJWK) - [Resource `Patches`](#0x1_jwks_Patches) - [Resource `PatchedJWKs`](#0x1_jwks_PatchedJWKs) +- [Resource `FederatedJWKs`](#0x1_jwks_FederatedJWKs) - [Constants](#@Constants_0) +- [Function `patch_federated_jwks`](#0x1_jwks_patch_federated_jwks) +- [Function `update_federated_jwk_set`](#0x1_jwks_update_federated_jwk_set) - [Function `get_patched_jwk`](#0x1_jwks_get_patched_jwk) - [Function `try_get_patched_jwk`](#0x1_jwks_try_get_patched_jwk) - [Function `upsert_oidc_provider`](#0x1_jwks_upsert_oidc_provider) @@ -59,14 +62,17 @@ have a simple layout which is easily accessible in Rust. - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) -
use 0x1::chain_status;
+
use 0x1::bcs;
+use 0x1::chain_status;
 use 0x1::comparator;
 use 0x1::config_buffer;
 use 0x1::copyable_any;
 use 0x1::error;
 use 0x1::event;
+use 0x1::features;
 use 0x1::option;
 use 0x1::reconfiguration;
+use 0x1::signer;
 use 0x1::string;
 use 0x1::system_addresses;
 use 0x1::vector;
@@ -589,6 +595,34 @@ This is what applications should consume.
 
 
 
+
+Fields + + +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Resource `FederatedJWKs` + +JWKs for federated keyless accounts are stored in this resource. + + +
struct FederatedJWKs has drop, key
+
+ + +
Fields @@ -610,6 +644,42 @@ This is what applications should consume. ## Constants + + + + +
const DELETE_COMMAND_INDICATOR: vector<u8> = [84, 72, 73, 83, 95, 73, 83, 95, 65, 95, 68, 69, 76, 69, 84, 69, 95, 67, 79, 77, 77, 65, 78, 68];
+
+ + + + + + + +
const EFEDERATED_JWKS_TOO_LARGE: u64 = 8;
+
+ + + + + + + +
const EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK: u64 = 7;
+
+ + + + + + + +
const EINVALID_FEDERATED_JWK_SET: u64 = 9;
+
+ + + @@ -709,6 +779,156 @@ This is what applications should consume. + + +We limit the size of a PatchedJWKs resource installed by a dapp owner for federated keyless accounts. +Note: If too large, validators waste work reading it for invalid TXN signatures. + + +
const MAX_FEDERATED_JWKS_SIZE_BYTES: u64 = 2048;
+
+ + + + + +## Function `patch_federated_jwks` + +Called by a federated keyless dapp owner to install the JWKs for the federated OIDC provider (e.g., Auth0, AWS +Cognito, etc). For type-safety, we explicitly use a struct FederatedJWKs { jwks: AllProviderJWKs } instead of +reusing PatchedJWKs { jwks: AllProviderJWKs }, which is a JWK-consensus-specific struct. + + +
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<jwks::Patch>)
+
+ + + +
+Implementation + + +
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<Patch>) acquires FederatedJWKs {
+    // Prevents accidental calls in 0x1::jwks that install federated JWKs at the Aptos framework address.
+    assert!(!system_addresses::is_aptos_framework_address(signer::address_of(jwk_owner)),
+        error::invalid_argument(EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK)
+    );
+
+    let jwk_addr = signer::address_of(jwk_owner);
+    if (!exists<FederatedJWKs>(jwk_addr)) {
+        move_to(jwk_owner, FederatedJWKs { jwks: AllProvidersJWKs { entries: vector[] } });
+    };
+
+    let fed_jwks = borrow_global_mut<FederatedJWKs>(jwk_addr);
+    vector::for_each_ref(&patches, |obj|{
+        let patch: &Patch = obj;
+        apply_patch(&mut fed_jwks.jwks, *patch);
+    });
+
+    // TODO: Can we check the size more efficiently instead of serializing it via BCS?
+    let num_bytes = vector::length(&bcs::to_bytes(fed_jwks));
+    assert!(num_bytes < MAX_FEDERATED_JWKS_SIZE_BYTES, error::invalid_argument(EFEDERATED_JWKS_TOO_LARGE));
+}
+
+ + + +
+ + + +## Function `update_federated_jwk_set` + +This can be called to install or update a set of JWKs for a federated OIDC provider. This function should +be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + +The iss parameter is the value of the iss claim on the JWTs that are to be verified by the JWK set. +kid_vec, alg_vec, e_vec, n_vec are String vectors of the JWK attributes kid, alg, e and n respectively. +See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + +For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - +```json +{ +"keys": [ +{ +"alg": "RS256", +"use": "sig", +"kty": "RSA", +"n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", +"kid": "d7b939771a7800c413f90051012d975981916d71", +"e": "AQAB" +}, +{ +"kty": "RSA", +"kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", +"alg": "RS256", +"n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", +"e": "AQAB", +"use": "sig" +} +] +} +``` + +We can call update_federated_jwk_set for Google's iss - "https://accounts.google.com" and for each vector +argument kid_vec, alg_vec, e_vec, n_vec, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 +the corresponding attribute in the second JWK as shown below. + +```move +use std::string::utf8; +aptos_framework::jwks::update_federated_jwk_set( +jwk_owner, +b"https://accounts.google.com", +vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], +vector[utf8(b"RS256"), utf8(b"RS256")], +vector[utf8(b"AQAB"), utf8(b"AQAB")], +vector[ +utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), +utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") +] +) +``` + +See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + +NOTE: Currently only RSA keys are supported. + + +
public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector<u8>, kid_vec: vector<string::String>, alg_vec: vector<string::String>, e_vec: vector<string::String>, n_vec: vector<string::String>)
+
+ + + +
+Implementation + + +
public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector<u8>, kid_vec: vector<String>, alg_vec: vector<String>, e_vec: vector<String>, n_vec: vector<String>) acquires FederatedJWKs {
+    assert!(!vector::is_empty(&kid_vec), error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    let num_jwk = vector::length<String>(&kid_vec);
+    assert!(vector::length(&alg_vec) == num_jwk , error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    assert!(vector::length(&e_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    assert!(vector::length(&n_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+
+    let remove_all_patch = new_patch_remove_all();
+    let patches = vector[remove_all_patch];
+    while (!vector::is_empty(&kid_vec)) {
+        let kid = vector::pop_back(&mut kid_vec);
+        let alg = vector::pop_back(&mut alg_vec);
+        let e = vector::pop_back(&mut e_vec);
+        let n = vector::pop_back(&mut n_vec);
+        let jwk = new_rsa_jwk(kid, alg, e, n);
+        let patch = new_patch_upsert_jwk(iss, jwk);
+        vector::push_back(&mut patches, patch)
+    };
+    patch_federated_jwks(jwk_owner, patches);
+}
+
+ + + +
+ ## Function `get_patched_jwk` @@ -826,9 +1046,9 @@ aptos_framework::aptos_governance::reconfigure(&framework_signer); system_addresses::assert_aptos_framework(fx); let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) { - config_buffer::extract<SupportedOIDCProviders>() + config_buffer::extract_v2<SupportedOIDCProviders>() } else { - *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework) + *borrow_global<SupportedOIDCProviders>(@aptos_framework) }; let old_config_url = remove_oidc_provider_internal(&mut provider_set, name); @@ -901,9 +1121,9 @@ aptos_framework::aptos_governance::reconfigure(&framework_signer); system_addresses::assert_aptos_framework(fx); let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) { - config_buffer::extract<SupportedOIDCProviders>() + config_buffer::extract_v2<SupportedOIDCProviders>() } else { - *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework) + *borrow_global<SupportedOIDCProviders>(@aptos_framework) }; let ret = remove_oidc_provider_internal(&mut provider_set, name); config_buffer::upsert(provider_set); @@ -934,7 +1154,7 @@ Only used in reconfigurations to apply the pending on_new_epoch(framework: &signer) acquires SupportedOIDCProviders { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist<SupportedOIDCProviders>()) { - let new_config = config_buffer::extract<SupportedOIDCProviders>(); + let new_config = config_buffer::extract_v2<SupportedOIDCProviders>(); if (exists<SupportedOIDCProviders>(@aptos_framework)) { *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework) = new_config; } else { @@ -1230,10 +1450,42 @@ and its version equals to the
public fun upsert_into_observed_jwks(fx: &signer, provider_jwks_vec: vector<ProviderJWKs>) acquires ObservedJWKs, PatchedJWKs, Patches {
     system_addresses::assert_aptos_framework(fx);
     let observed_jwks = borrow_global_mut<ObservedJWKs>(@aptos_framework);
-    vector::for_each(provider_jwks_vec, |obj| {
-        let provider_jwks: ProviderJWKs = obj;
-        upsert_provider_jwks(&mut observed_jwks.jwks, provider_jwks);
-    });
+
+    if (features::is_jwk_consensus_per_key_mode_enabled()) {
+        vector::for_each(provider_jwks_vec, |proposed_provider_jwks|{
+            let maybe_cur_issuer_jwks = remove_issuer(&mut observed_jwks.jwks, proposed_provider_jwks.issuer);
+            let cur_issuer_jwks = if (option::is_some(&maybe_cur_issuer_jwks)) {
+                option::extract(&mut maybe_cur_issuer_jwks)
+            } else {
+                ProviderJWKs {
+                    issuer: proposed_provider_jwks.issuer,
+                    version: 0,
+                    jwks: vector[],
+                }
+            };
+            assert!(cur_issuer_jwks.version + 1 == proposed_provider_jwks.version, error::invalid_argument(EUNEXPECTED_VERSION));
+            vector::for_each(proposed_provider_jwks.jwks, |jwk|{
+                let variant_type_name = *string::bytes(copyable_any::type_name(&jwk.variant));
+                let is_delete = if (variant_type_name == b"0x1::jwks::UnsupportedJWK") {
+                    let repr = copyable_any::unpack<UnsupportedJWK>(jwk.variant);
+                    &repr.payload == &DELETE_COMMAND_INDICATOR
+                } else {
+                    false
+                };
+                if (is_delete) {
+                    remove_jwk(&mut cur_issuer_jwks, get_jwk_id(&jwk));
+                } else {
+                    upsert_jwk(&mut cur_issuer_jwks, jwk);
+                }
+            });
+            cur_issuer_jwks.version = cur_issuer_jwks.version + 1;
+            upsert_provider_jwks(&mut observed_jwks.jwks, cur_issuer_jwks);
+        });
+    } else {
+        vector::for_each(provider_jwks_vec, |provider_jwks| {
+            upsert_provider_jwks(&mut observed_jwks.jwks, provider_jwks);
+        });
+    };
 
     let epoch = reconfiguration::current_epoch();
     emit(ObservedJWKsUpdated { epoch, jwks: observed_jwks.jwks });
@@ -1315,7 +1567,7 @@ Regenerate PatchedJWKs f
 
 ## Function `try_get_jwk_by_issuer`
 
-Get a JWK by issuer and key ID from a AllProvidersJWKs, if it exists.
+Get a JWK by issuer and key ID from an AllProvidersJWKs, if it exists.
 
 
 
fun try_get_jwk_by_issuer(jwks: &jwks::AllProvidersJWKs, issuer: vector<u8>, jwk_id: vector<u8>): option::Option<jwks::JWK>
diff --git a/aptos-move/framework/aptos-framework/doc/keyless_account.md b/aptos-move/framework/aptos-framework/doc/keyless_account.md
index 6a5ec753b84b3..346ea01cded3d 100644
--- a/aptos-move/framework/aptos-framework/doc/keyless_account.md
+++ b/aptos-move/framework/aptos-framework/doc/keyless_account.md
@@ -616,7 +616,7 @@ WARNING: If a malicious key is set, this *could* lead to stolen funds.
     };
 
     let config = if (config_buffer::does_exist<Configuration>()) {
-        config_buffer::extract<Configuration>()
+        config_buffer::extract_v2<Configuration>()
     } else {
         *borrow_global<Configuration>(signer::address_of(fx))
     };
@@ -652,7 +652,7 @@ reconfiguration. Only callable via governance proposal.
     system_addresses::assert_aptos_framework(fx);
 
     let config = if (config_buffer::does_exist<Configuration>()) {
-        config_buffer::extract<Configuration>()
+        config_buffer::extract_v2<Configuration>()
     } else {
         *borrow_global<Configuration>(signer::address_of(fx))
     };
@@ -691,7 +691,7 @@ is no longer possible.
     system_addresses::assert_aptos_framework(fx);
 
     let config = if (config_buffer::does_exist<Configuration>()) {
-        config_buffer::extract<Configuration>()
+        config_buffer::extract_v2<Configuration>()
     } else {
         *borrow_global<Configuration>(signer::address_of(fx))
     };
@@ -710,7 +710,7 @@ is no longer possible.
 
 ## Function `add_override_aud_for_next_epoch`
 
-Convenience method to queue up an append to to the set of override aud's. The change will only be effective
+Convenience method to queue up an append to the set of override aud's. The change will only be effective
 after reconfiguration. Only callable via governance proposal.
 
 WARNING: If a malicious override aud is set, this *could* lead to stolen funds.
@@ -729,7 +729,7 @@ WARNING: If a malicious override aud is set, this *could* lead to s
     system_addresses::assert_aptos_framework(fx);
 
     let config = if (config_buffer::does_exist<Configuration>()) {
-        config_buffer::extract<Configuration>()
+        config_buffer::extract_v2<Configuration>()
     } else {
         *borrow_global<Configuration>(signer::address_of(fx))
     };
@@ -764,7 +764,7 @@ Only used in reconfigurations to apply the queued up configuration changes, if t
     system_addresses::assert_aptos_framework(fx);
 
     if (config_buffer::does_exist<Groth16VerificationKey>()) {
-        let vk = config_buffer::extract();
+        let vk = config_buffer::extract_v2();
         if (exists<Groth16VerificationKey>(@aptos_framework)) {
             *borrow_global_mut<Groth16VerificationKey>(@aptos_framework) = vk;
         } else {
@@ -773,7 +773,7 @@ Only used in reconfigurations to apply the queued up configuration changes, if t
     };
 
     if (config_buffer::does_exist<Configuration>()) {
-        let config = config_buffer::extract();
+        let config = config_buffer::extract_v2();
         if (exists<Configuration>(@aptos_framework)) {
             *borrow_global_mut<Configuration>(@aptos_framework) = config;
         } else {
diff --git a/aptos-move/framework/aptos-framework/doc/managed_coin.md b/aptos-move/framework/aptos-framework/doc/managed_coin.md
index 8ae37e71a1f9c..ede3c87a278f2 100644
--- a/aptos-move/framework/aptos-framework/doc/managed_coin.md
+++ b/aptos-move/framework/aptos-framework/doc/managed_coin.md
@@ -14,6 +14,8 @@ By utilizing this current module, a developer can create his own coin and care l
 -  [Function `initialize`](#0x1_managed_coin_initialize)
 -  [Function `mint`](#0x1_managed_coin_mint)
 -  [Function `register`](#0x1_managed_coin_register)
+-  [Function `destroy_caps`](#0x1_managed_coin_destroy_caps)
+-  [Function `remove_caps`](#0x1_managed_coin_remove_caps)
 -  [Specification](#@Specification_1)
     -  [High-level Requirements](#high-level-req)
     -  [Module-level Specification](#module-level-spec)
@@ -21,6 +23,8 @@ By utilizing this current module, a developer can create his own coin and care l
     -  [Function `initialize`](#@Specification_1_initialize)
     -  [Function `mint`](#@Specification_1_mint)
     -  [Function `register`](#@Specification_1_register)
+    -  [Function `destroy_caps`](#@Specification_1_destroy_caps)
+    -  [Function `remove_caps`](#@Specification_1_remove_caps)
 
 
 
use 0x1::coin;
@@ -231,6 +235,72 @@ Required if user wants to start accepting deposits of CoinType in h
 
 
 
+
+ + + +## Function `destroy_caps` + +Destroys capabilities from the account, so that the user no longer has access to mint or burn. + + +
public entry fun destroy_caps<CoinType>(account: &signer)
+
+ + + +
+Implementation + + +
public entry fun destroy_caps<CoinType>(account: &signer) acquires Capabilities {
+    let (burn_cap, freeze_cap, mint_cap) = remove_caps<CoinType>(account);
+    destroy_burn_cap(burn_cap);
+    destroy_freeze_cap(freeze_cap);
+    destroy_mint_cap(mint_cap);
+}
+
+ + + +
+ + + +## Function `remove_caps` + +Removes capabilities from the account to be stored or destroyed elsewhere + + +
public fun remove_caps<CoinType>(account: &signer): (coin::BurnCapability<CoinType>, coin::FreezeCapability<CoinType>, coin::MintCapability<CoinType>)
+
+ + + +
+Implementation + + +
public fun remove_caps<CoinType>(
+    account: &signer
+): (BurnCapability<CoinType>, FreezeCapability<CoinType>, MintCapability<CoinType>) acquires Capabilities {
+    let account_addr = signer::address_of(account);
+    assert!(
+        exists<Capabilities<CoinType>>(account_addr),
+        error::not_found(ENO_CAPABILITIES),
+    );
+
+    let Capabilities<CoinType> {
+        burn_cap,
+        freeze_cap,
+        mint_cap,
+    } = move_from<Capabilities<CoinType>>(account_addr);
+    (burn_cap, freeze_cap, mint_cap)
+}
+
+ + +
@@ -308,7 +378,7 @@ Required if user wants to start accepting deposits of CoinType in h
pragma verify = true;
-pragma aborts_if_is_strict;
+pragma aborts_if_is_partial;
 
@@ -423,4 +493,40 @@ Updating Account.guid_creation_num will not overflow.
+ + + +### Function `destroy_caps` + + +
public entry fun destroy_caps<CoinType>(account: &signer)
+
+ + + + +
let account_addr = signer::address_of(account);
+aborts_if !exists<Capabilities<CoinType>>(account_addr);
+ensures !exists<Capabilities<CoinType>>(account_addr);
+
+ + + + + +### Function `remove_caps` + + +
public fun remove_caps<CoinType>(account: &signer): (coin::BurnCapability<CoinType>, coin::FreezeCapability<CoinType>, coin::MintCapability<CoinType>)
+
+ + + + +
let account_addr = signer::address_of(account);
+aborts_if !exists<Capabilities<CoinType>>(account_addr);
+ensures !exists<Capabilities<CoinType>>(account_addr);
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/multisig_account.md b/aptos-move/framework/aptos-framework/doc/multisig_account.md index a5fcc8ab9b835..e3e5341ed7cbd 100644 --- a/aptos-move/framework/aptos-framework/doc/multisig_account.md +++ b/aptos-move/framework/aptos-framework/doc/multisig_account.md @@ -82,7 +82,9 @@ and implement the governance voting logic on top. - [Function `next_sequence_number`](#0x1_multisig_account_next_sequence_number) - [Function `vote`](#0x1_multisig_account_vote) - [Function `available_transaction_queue_capacity`](#0x1_multisig_account_available_transaction_queue_capacity) +- [Function `create_with_existing_account_call`](#0x1_multisig_account_create_with_existing_account_call) - [Function `create_with_existing_account`](#0x1_multisig_account_create_with_existing_account) +- [Function `create_with_existing_account_and_revoke_auth_key_call`](#0x1_multisig_account_create_with_existing_account_and_revoke_auth_key_call) - [Function `create_with_existing_account_and_revoke_auth_key`](#0x1_multisig_account_create_with_existing_account_and_revoke_auth_key) - [Function `create`](#0x1_multisig_account_create) - [Function `create_with_owners`](#0x1_multisig_account_create_with_owners) @@ -1831,7 +1833,7 @@ Return the id of the last transaction that was executed (successful or failed) o
public fun last_resolved_sequence_number(multisig_account: address): u64 acquires MultisigAccount {
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
     multisig_account_resource.last_executed_sequence_number
 }
 
@@ -1858,7 +1860,7 @@ Return the id of the next transaction created.
public fun next_sequence_number(multisig_account: address): u64 acquires MultisigAccount {
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
     multisig_account_resource.next_sequence_number
 }
 
@@ -1886,7 +1888,7 @@ Return a bool tuple indicating whether an owner has voted and if so, whether the
public fun vote(
     multisig_account: address, sequence_number: u64, owner: address): (bool, bool) acquires MultisigAccount {
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
     assert!(
         sequence_number > 0 && sequence_number < multisig_account_resource.next_sequence_number,
         error::invalid_argument(EINVALID_SEQUENCE_NUMBER),
@@ -1920,7 +1922,7 @@ Return a bool tuple indicating whether an owner has voted and if so, whether the
 
 
 
public fun available_transaction_queue_capacity(multisig_account: address): u64 acquires MultisigAccount {
-    let multisig_account_resource = borrow_global_mut<MultisigAccount>(multisig_account);
+    let multisig_account_resource = borrow_global<MultisigAccount>(multisig_account);
     let num_pending_transactions = multisig_account_resource.next_sequence_number - multisig_account_resource.last_executed_sequence_number - 1;
     if (num_pending_transactions > MAX_PENDING_TRANSACTIONS) {
         0
@@ -1932,6 +1934,50 @@ Return a bool tuple indicating whether an owner has voted and if so, whether the
 
 
 
+
+
+
+
+## Function `create_with_existing_account_call`
+
+Private entry function that creates a new multisig account on top of an existing account.
+
+This offers a migration path for an existing account with any type of auth key.
+
+Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth
+key after they are fully migrated to the new multisig account. Alternatively, they can call
+create_with_existing_account_and_revoke_auth_key_call instead.
+
+
+
entry fun create_with_existing_account_call(multisig_account: &signer, owners: vector<address>, num_signatures_required: u64, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
entry fun create_with_existing_account_call(
+    multisig_account: &signer,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    metadata_keys: vector<String>,
+    metadata_values: vector<vector<u8>>,
+) acquires MultisigAccount {
+    create_with_owners_internal(
+        multisig_account,
+        owners,
+        num_signatures_required,
+        option::none<SignerCapability>(),
+        metadata_keys,
+        metadata_values,
+    );
+}
+
+ + +
@@ -2002,6 +2048,61 @@ create_with_existing_account_and_revoke_auth_key instead. + + + + +## Function `create_with_existing_account_and_revoke_auth_key_call` + +Private entry function that creates a new multisig account on top of an existing account and immediately rotate +the origin auth key to 0x0. + +Note: If the original account is a resource account, this does not revoke all control over it as if any +SignerCapability of the resource account still exists, it can still be used to generate the signer for the +account. + + +
entry fun create_with_existing_account_and_revoke_auth_key_call(multisig_account: &signer, owners: vector<address>, num_signatures_required: u64, metadata_keys: vector<string::String>, metadata_values: vector<vector<u8>>)
+
+ + + +
+Implementation + + +
entry fun create_with_existing_account_and_revoke_auth_key_call(
+    multisig_account: &signer,
+    owners: vector<address>,
+    num_signatures_required: u64,
+    metadata_keys: vector<String>,
+    metadata_values:vector<vector<u8>>,
+) acquires MultisigAccount {
+    create_with_owners_internal(
+        multisig_account,
+        owners,
+        num_signatures_required,
+        option::none<SignerCapability>(),
+        metadata_keys,
+        metadata_values,
+    );
+
+    // Rotate the account's auth key to 0x0, which effectively revokes control via auth key.
+    let multisig_address = address_of(multisig_account);
+    account::rotate_authentication_key_internal(multisig_account, ZERO_AUTH_KEY);
+    // This also needs to revoke any signer capability or rotation capability that exists for the account to
+    // completely remove all access to the account.
+    if (account::is_signer_capability_offered(multisig_address)) {
+        account::revoke_any_signer_capability(multisig_account);
+    };
+    if (account::is_rotation_capability_offered(multisig_address)) {
+        account::revoke_any_rotation_capability(multisig_account);
+    };
+}
+
+ + +
@@ -2067,7 +2168,7 @@ account. // Rotate the account's auth key to 0x0, which effectively revokes control via auth key. let multisig_address = address_of(multisig_account); account::rotate_authentication_key_internal(multisig_account, ZERO_AUTH_KEY); - // This also needs to revoke any signer capability or rotation capability that exists for the account to + // This also needs to revoke any signer capability or rotation capability that exists for the account to // completely remove all access to the account. if (account::is_signer_capability_offered(multisig_address)) { account::revoke_any_signer_capability(multisig_account); @@ -2647,14 +2748,15 @@ maliciously alter the number of signatures required. new_metadata: multisig_account_resource.metadata, } ) + } else { + emit_event( + &mut multisig_account_resource.metadata_updated_events, + MetadataUpdatedEvent { + old_metadata, + new_metadata: multisig_account_resource.metadata, + } + ); }; - emit_event( - &mut multisig_account_resource.metadata_updated_events, - MetadataUpdatedEvent { - old_metadata, - new_metadata: multisig_account_resource.metadata, - } - ); }; }
@@ -2849,15 +2951,16 @@ will continue to be an accessible entry point. approved, } ); + } else { + emit_event( + &mut multisig_account_resource.vote_events, + VoteEvent { + owner: owner_addr, + sequence_number, + approved, + } + ); }; - emit_event( - &mut multisig_account_resource.vote_events, - VoteEvent { - owner: owner_addr, - sequence_number, - approved, - } - ); }
@@ -2971,15 +3074,16 @@ Remove the next transaction if it has sufficient owner rejections. executor: address_of(owner), } ); + } else { + emit_event( + &mut multisig_account_resource.execute_rejected_transaction_events, + ExecuteRejectedTransactionEvent { + sequence_number, + num_rejections, + executor: owner_addr, + } + ); }; - emit_event( - &mut multisig_account_resource.execute_rejected_transaction_events, - ExecuteRejectedTransactionEvent { - sequence_number, - num_rejections, - executor: owner_addr, - } - ); }
@@ -3125,16 +3229,17 @@ This function is private so no other code can call this beside the VM itself as executor, } ); + } else { + emit_event( + &mut multisig_account_resource.execute_transaction_events, + TransactionExecutionSucceededEvent { + sequence_number: multisig_account_resource.last_executed_sequence_number, + transaction_payload, + num_approvals, + executor, + } + ); }; - emit_event( - &mut multisig_account_resource.execute_transaction_events, - TransactionExecutionSucceededEvent { - sequence_number: multisig_account_resource.last_executed_sequence_number, - transaction_payload, - num_approvals, - executor, - } - ); }
@@ -3178,17 +3283,18 @@ This function is private so no other code can call this beside the VM itself as execution_error, } ); + } else { + emit_event( + &mut multisig_account_resource.transaction_execution_failed_events, + TransactionExecutionFailedEvent { + executor, + sequence_number: multisig_account_resource.last_executed_sequence_number, + transaction_payload, + num_approvals, + execution_error, + } + ); }; - emit_event( - &mut multisig_account_resource.transaction_execution_failed_events, - TransactionExecutionFailedEvent { - executor, - sequence_number: multisig_account_resource.last_executed_sequence_number, - transaction_payload, - num_approvals, - execution_error, - } - ); }
@@ -3228,16 +3334,17 @@ This function is private so no other code can call this beside the VM itself as approved: true, } ); + } else { + emit_event( + &mut multisig_account_resource.vote_events, + VoteEvent { + owner: executor, + sequence_number, + approved: true, + } + ); }; num_approvals = num_approvals + 1; - emit_event( - &mut multisig_account_resource.vote_events, - VoteEvent { - owner: executor, - sequence_number, - approved: true, - } - ); }; num_approvals @@ -3314,11 +3421,12 @@ This function is private so no other code can call this beside the VM itself as emit( CreateTransaction { multisig_account: multisig_account, creator, sequence_number, transaction } ); + } else { + emit_event( + &mut multisig_account_resource.create_transaction_events, + CreateTransactionEvent { creator, sequence_number, transaction }, + ); }; - emit_event( - &mut multisig_account_resource.create_transaction_events, - CreateTransactionEvent { creator, sequence_number, transaction }, - ); }
@@ -3706,11 +3814,12 @@ Add new owners, remove owners to remove, update signatures required. ); if (std::features::module_event_migration_enabled()) { emit(AddOwners { multisig_account: multisig_address, owners_added: new_owners }); + } else { + emit_event( + &mut multisig_account_ref_mut.add_owners_events, + AddOwnersEvent { owners_added: new_owners } + ); }; - emit_event( - &mut multisig_account_ref_mut.add_owners_events, - AddOwnersEvent { owners_added: new_owners } - ); }; // If owners to remove provided, try to remove them. if (vector::length(&owners_to_remove) > 0) { @@ -3732,11 +3841,12 @@ Add new owners, remove owners to remove, update signatures required. emit( RemoveOwners { multisig_account: multisig_address, owners_removed } ); + } else { + emit_event( + &mut multisig_account_ref_mut.remove_owners_events, + RemoveOwnersEvent { owners_removed } + ); }; - emit_event( - &mut multisig_account_ref_mut.remove_owners_events, - RemoveOwnersEvent { owners_removed } - ); } }; // If new signature count provided, try to update count. @@ -3761,14 +3871,15 @@ Add new owners, remove owners to remove, update signatures required. new_num_signatures_required, } ); - }; - emit_event( - &mut multisig_account_ref_mut.update_signature_required_events, - UpdateSignaturesRequiredEvent { - old_num_signatures_required, - new_num_signatures_required, - } - ); + } else { + emit_event( + &mut multisig_account_ref_mut.update_signature_required_events, + UpdateSignaturesRequiredEvent { + old_num_signatures_required, + new_num_signatures_required, + } + ); + } } }; // Verify number of owners. @@ -4055,8 +4166,7 @@ Add new owners, remove owners to remove, update signatures required. -
aborts_if !exists<account::Account>(creator);
-let owner_nonce = global<account::Account>(creator).sequence_number;
+
let owner_nonce = global<account::Account>(creator).sequence_number;
 
diff --git a/aptos-move/framework/aptos-framework/doc/native_bridge.md b/aptos-move/framework/aptos-framework/doc/native_bridge.md deleted file mode 100644 index bc7c86d56ef71..0000000000000 --- a/aptos-move/framework/aptos-framework/doc/native_bridge.md +++ /dev/null @@ -1,2064 +0,0 @@ - - - -# Module `0x1::native_bridge` - - - -- [Struct `BridgeConfigRelayerUpdated`](#0x1_native_bridge_BridgeConfigRelayerUpdated) -- [Struct `BridgeFeeChangedEvent`](#0x1_native_bridge_BridgeFeeChangedEvent) -- [Struct `BridgeInsuranceBudgetDividerChangedEvent`](#0x1_native_bridge_BridgeInsuranceBudgetDividerChangedEvent) -- [Struct `BridgeInsuranceFundChangedEvent`](#0x1_native_bridge_BridgeInsuranceFundChangedEvent) -- [Struct `BridgeTransferInitiatedEvent`](#0x1_native_bridge_BridgeTransferInitiatedEvent) -- [Struct `BridgeTransferCompletedEvent`](#0x1_native_bridge_BridgeTransferCompletedEvent) -- [Resource `BridgeEvents`](#0x1_native_bridge_BridgeEvents) -- [Resource `AptosCoinBurnCapability`](#0x1_native_bridge_AptosCoinBurnCapability) -- [Resource `AptosCoinMintCapability`](#0x1_native_bridge_AptosCoinMintCapability) -- [Resource `AptosFABurnCapabilities`](#0x1_native_bridge_AptosFABurnCapabilities) -- [Resource `AptosFAMintCapabilities`](#0x1_native_bridge_AptosFAMintCapabilities) -- [Resource `Nonce`](#0x1_native_bridge_Nonce) -- [Resource `OutboundRateLimitBudget`](#0x1_native_bridge_OutboundRateLimitBudget) -- [Resource `InboundRateLimitBudget`](#0x1_native_bridge_InboundRateLimitBudget) -- [Resource `SmartTableWrapper`](#0x1_native_bridge_SmartTableWrapper) -- [Struct `OutboundTransfer`](#0x1_native_bridge_OutboundTransfer) -- [Resource `BridgeConfig`](#0x1_native_bridge_BridgeConfig) -- [Constants](#@Constants_0) -- [Function `initialize`](#0x1_native_bridge_initialize) -- [Function `normalize_u64_to_32_bytes`](#0x1_native_bridge_normalize_u64_to_32_bytes) -- [Function `is_inbound_nonce_set`](#0x1_native_bridge_is_inbound_nonce_set) -- [Function `create_details`](#0x1_native_bridge_create_details) -- [Function `add`](#0x1_native_bridge_add) -- [Function `set_bridge_transfer_id_to_inbound_nonce`](#0x1_native_bridge_set_bridge_transfer_id_to_inbound_nonce) -- [Function `assert_valid_bridge_transfer_id`](#0x1_native_bridge_assert_valid_bridge_transfer_id) -- [Function `bridge_transfer_id`](#0x1_native_bridge_bridge_transfer_id) -- [Function `bridge_relayer`](#0x1_native_bridge_bridge_relayer) -- [Function `insurance_fund`](#0x1_native_bridge_insurance_fund) -- [Function `insurance_budget_divider`](#0x1_native_bridge_insurance_budget_divider) -- [Function `bridge_fee`](#0x1_native_bridge_bridge_fee) -- [Function `get_bridge_transfer_details_from_nonce`](#0x1_native_bridge_get_bridge_transfer_details_from_nonce) -- [Function `get_inbound_nonce_from_bridge_transfer_id`](#0x1_native_bridge_get_inbound_nonce_from_bridge_transfer_id) -- [Function `increment_and_get_nonce`](#0x1_native_bridge_increment_and_get_nonce) -- [Function `store_aptos_coin_burn_cap`](#0x1_native_bridge_store_aptos_coin_burn_cap) -- [Function `store_aptos_coin_mint_cap`](#0x1_native_bridge_store_aptos_coin_mint_cap) -- [Function `mint_to`](#0x1_native_bridge_mint_to) -- [Function `mint`](#0x1_native_bridge_mint) -- [Function `mint_internal`](#0x1_native_bridge_mint_internal) -- [Function `burn_from`](#0x1_native_bridge_burn_from) -- [Function `burn`](#0x1_native_bridge_burn) -- [Function `burn_internal`](#0x1_native_bridge_burn_internal) -- [Function `initiate_bridge_transfer`](#0x1_native_bridge_initiate_bridge_transfer) -- [Function `complete_bridge_transfer`](#0x1_native_bridge_complete_bridge_transfer) -- [Function `charge_bridge_fee`](#0x1_native_bridge_charge_bridge_fee) -- [Function `update_bridge_relayer`](#0x1_native_bridge_update_bridge_relayer) -- [Function `update_bridge_fee`](#0x1_native_bridge_update_bridge_fee) -- [Function `update_insurance_fund`](#0x1_native_bridge_update_insurance_fund) -- [Function `update_insurance_budget_divider`](#0x1_native_bridge_update_insurance_budget_divider) -- [Function `assert_is_caller_relayer`](#0x1_native_bridge_assert_is_caller_relayer) -- [Function `assert_outbound_rate_limit_budget_not_exceeded`](#0x1_native_bridge_assert_outbound_rate_limit_budget_not_exceeded) -- [Function `assert_inbound_rate_limit_budget_not_exceeded`](#0x1_native_bridge_assert_inbound_rate_limit_budget_not_exceeded) -- [Function `test_normalize_u64_to_32_bytes_helper`](#0x1_native_bridge_test_normalize_u64_to_32_bytes_helper) - - -
use 0x1::account;
-use 0x1::aptos_coin;
-use 0x1::aptos_hash;
-use 0x1::bcs;
-use 0x1::coin;
-use 0x1::ethereum;
-use 0x1::event;
-use 0x1::features;
-use 0x1::fungible_asset;
-use 0x1::signer;
-use 0x1::smart_table;
-use 0x1::system_addresses;
-use 0x1::timestamp;
-use 0x1::vector;
-
- - - - - -## Struct `BridgeConfigRelayerUpdated` - -Event emitted when the bridge relayer is updated. - - -
#[event]
-struct BridgeConfigRelayerUpdated has drop, store
-
- - - -
-Fields - - -
-
-old_relayer: address -
-
- -
-
-new_relayer: address -
-
- -
-
- - -
- - - -## Struct `BridgeFeeChangedEvent` - -An event triggered upon change of bridgefee - - -
#[event]
-struct BridgeFeeChangedEvent has drop, store
-
- - - -
-Fields - - -
-
-old_bridge_fee: u64 -
-
- -
-
-new_bridge_fee: u64 -
-
- -
-
- - -
- - - -## Struct `BridgeInsuranceBudgetDividerChangedEvent` - -An event triggered upon change of insurance budget divider - - -
#[event]
-struct BridgeInsuranceBudgetDividerChangedEvent has drop, store
-
- - - -
-Fields - - -
-
-old_insurance_budget_divider: u64 -
-
- -
-
-new_insurance_budget_divider: u64 -
-
- -
-
- - -
- - - -## Struct `BridgeInsuranceFundChangedEvent` - -An event triggered upon change of insurance fund - - -
#[event]
-struct BridgeInsuranceFundChangedEvent has drop, store
-
- - - -
-Fields - - -
-
-old_insurance_fund: address -
-
- -
-
-new_insurance_fund: address -
-
- -
-
- - -
- - - -## Struct `BridgeTransferInitiatedEvent` - -An event triggered upon initiating a bridge transfer - - -
#[event]
-struct BridgeTransferInitiatedEvent has drop, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
-initiator: address -
-
- -
-
-recipient: vector<u8> -
-
- -
-
-amount: u64 -
-
- -
-
-nonce: u64 -
-
- -
-
- - -
- - - -## Struct `BridgeTransferCompletedEvent` - -An event triggered upon completing a bridge transfer - - -
#[event]
-struct BridgeTransferCompletedEvent has drop, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
-initiator: vector<u8> -
-
- -
-
-recipient: address -
-
- -
-
-amount: u64 -
-
- -
-
-nonce: u64 -
-
- -
-
- - -
- - - -## Resource `BridgeEvents` - -This struct will store the event handles for bridge events. - - -
struct BridgeEvents has store, key
-
- - - -
-Fields - - -
-
-bridge_transfer_initiated_events: event::EventHandle<native_bridge::BridgeTransferInitiatedEvent> -
-
- -
-
-bridge_transfer_completed_events: event::EventHandle<native_bridge::BridgeTransferCompletedEvent> -
-
- -
-
- - -
- - - -## Resource `AptosCoinBurnCapability` - - - -
struct AptosCoinBurnCapability has key
-
- - - -
-Fields - - -
-
-burn_cap: coin::BurnCapability<aptos_coin::AptosCoin> -
-
- -
-
- - -
- - - -## Resource `AptosCoinMintCapability` - - - -
struct AptosCoinMintCapability has key
-
- - - -
-Fields - - -
-
-mint_cap: coin::MintCapability<aptos_coin::AptosCoin> -
-
- -
-
- - -
- - - -## Resource `AptosFABurnCapabilities` - - - -
struct AptosFABurnCapabilities has key
-
- - - -
-Fields - - -
-
-burn_ref: fungible_asset::BurnRef -
-
- -
-
- - -
- - - -## Resource `AptosFAMintCapabilities` - - - -
struct AptosFAMintCapabilities has key
-
- - - -
-Fields - - -
-
-burn_ref: fungible_asset::MintRef -
-
- -
-
- - -
- - - -## Resource `Nonce` - -A nonce to ensure the uniqueness of bridge transfers - - -
struct Nonce has key
-
- - - -
-Fields - - -
-
-value: u64 -
-
- -
-
- - -
- - - -## Resource `OutboundRateLimitBudget` - - - -
struct OutboundRateLimitBudget has store, key
-
- - - -
-Fields - - -
-
-day: smart_table::SmartTable<u64, u64> -
-
- -
-
- - -
- - - -## Resource `InboundRateLimitBudget` - - - -
struct InboundRateLimitBudget has store, key
-
- - - -
-Fields - - -
-
-day: smart_table::SmartTable<u64, u64> -
-
- -
-
- - -
- - - -## Resource `SmartTableWrapper` - -A smart table wrapper - - -
struct SmartTableWrapper<K, V> has store, key
-
- - - -
-Fields - - -
-
-inner: smart_table::SmartTable<K, V> -
-
- -
-
- - -
- - - -## Struct `OutboundTransfer` - -Details on the outbound transfer - - -
struct OutboundTransfer has copy, store
-
- - - -
-Fields - - -
-
-bridge_transfer_id: vector<u8> -
-
- -
-
-initiator: address -
-
- -
-
-recipient: ethereum::EthereumAddress -
-
- -
-
-amount: u64 -
-
- -
-
- - -
- - - -## Resource `BridgeConfig` - - - -
struct BridgeConfig has key
-
- - - -
-Fields - - -
-
-bridge_relayer: address -
-
- -
-
-insurance_fund: address -
-
- -
-
-insurance_budget_divider: u64 -
-
- -
-
-bridge_fee: u64 -
-
- -
-
- - -
- - - -## Constants - - - - - - -
const ENATIVE_BRIDGE_NOT_ENABLED: u64 = 8;
-
- - - - - - - -
const EINVALID_BRIDGE_TRANSFER_ID: u64 = 2;
-
- - - - - - - -
const EZERO_AMOUNT: u64 = 7;
-
- - - - - - - -
const EEVENT_NOT_FOUND: u64 = 3;
-
- - - - - - - -
const EID_NOT_FOUND: u64 = 10;
-
- - - - - - - -
const EINCORRECT_NONCE: u64 = 9;
-
- - - - - - - -
const EINVALID_AMOUNT: u64 = 5;
-
- - - - - - - -
const EINVALID_BRIDGE_RELAYER: u64 = 11;
-
- - - - - - - -
const EINVALID_NONCE: u64 = 4;
-
- - - - - - - -
const EINVALID_VALUE: u64 = 3;
-
- - - - - - - -
const ENONCE_NOT_FOUND: u64 = 6;
-
- - - - - - - -
const ERATE_LIMIT_EXCEEDED: u64 = 4;
-
- - - - - - - -
const ESAME_FEE: u64 = 2;
-
- - - - - - - -
const ETRANSFER_ALREADY_PROCESSED: u64 = 1;
-
- - - - - -## Function `initialize` - -Initializes the module and stores the EventHandles in the resource. - - -
public fun initialize(aptos_framework: &signer)
-
- - - -
-Implementation - - -
public fun initialize(aptos_framework: &signer) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-
-    let bridge_config = BridgeConfig {
-        bridge_relayer: signer::address_of(aptos_framework),
-        insurance_fund: signer::address_of(aptos_framework),
-        insurance_budget_divider: 4,
-        bridge_fee: 40_000_000_000,
-    };
-    move_to(aptos_framework, bridge_config);
-
-    // Ensure the nonce is not already initialized
-    assert!(
-        !exists<Nonce>(signer::address_of(aptos_framework)),
-        2
-    );
-
-    // Create the Nonce resource with an initial value of 0
-    move_to<Nonce>(aptos_framework, Nonce {
-        value: 0
-    });
-
-
-    move_to(aptos_framework, BridgeEvents {
-        bridge_transfer_initiated_events: account::new_event_handle<BridgeTransferInitiatedEvent>(aptos_framework),
-        bridge_transfer_completed_events: account::new_event_handle<BridgeTransferCompletedEvent>(aptos_framework),
-    });
-    system_addresses::assert_aptos_framework(aptos_framework);
-
-    let outbound_rate_limit_budget = OutboundRateLimitBudget {
-        day: smart_table::new(),
-    };
-
-    move_to(aptos_framework, outbound_rate_limit_budget);
-
-
-    let inbound_rate_limit_budget = InboundRateLimitBudget {
-        day: smart_table::new(),
-    };
-
-    move_to(aptos_framework, inbound_rate_limit_budget);
-
-    let nonces_to_details = SmartTableWrapper<u64, OutboundTransfer> {
-        inner: smart_table::new(),
-    };
-
-    move_to(aptos_framework, nonces_to_details);
-
-    let ids_to_inbound_nonces = SmartTableWrapper<vector<u8>, u64> {
-        inner: smart_table::new(),
-    };
-
-    move_to(aptos_framework, ids_to_inbound_nonces);
-}
-
- - - -
- - - -## Function `normalize_u64_to_32_bytes` - -Converts a u64 to a 32-byte vector. - -@param value The u64 value to convert. -@return A 32-byte vector containing the u64 value in little-endian order. - -How BCS works: https://github.com/zefchain/bcs?tab=readme-ov-file#booleans-and-integers - -@example: a u64 value 0x12_34_56_78_ab_cd_ef_00 is converted to a 32-byte vector: -[0x00, 0x00, ..., 0x00, 0x12, 0x34, 0x56, 0x78, 0xab, 0xcd, 0xef, 0x00] - - -
public(friend) fun normalize_u64_to_32_bytes(value: &u64): vector<u8>
-
- - - -
-Implementation - - -
public(friend) fun normalize_u64_to_32_bytes(value: &u64): vector<u8> {
-    let r = bcs::to_bytes(&(*value as u256));
-    // BCS returns the bytes in reverse order, so we reverse the result.
-    vector::reverse(&mut r);
-    r
-}
-
- - - -
- - - -## Function `is_inbound_nonce_set` - -Checks if a bridge transfer ID is associated with an inbound nonce. -@param bridge_transfer_id The bridge transfer ID. -@return true if the ID is associated with an existing inbound nonce, false otherwise. - - -
public(friend) fun is_inbound_nonce_set(bridge_transfer_id: vector<u8>): bool
-
- - - -
-Implementation - - -
public(friend) fun is_inbound_nonce_set(bridge_transfer_id: vector<u8>): bool acquires SmartTableWrapper {
-    let table = borrow_global<SmartTableWrapper<vector<u8>, u64>>(@aptos_framework);
-    smart_table::contains(&table.inner, bridge_transfer_id)
-}
-
- - - -
- - - -## Function `create_details` - -Creates bridge transfer details with validation. - -@param initiator The initiating party of the transfer. -@param recipient The receiving party of the transfer. -@param amount The amount to be transferred. -@param nonce The unique nonce for the transfer. -@return A BridgeTransferDetails object. -@abort If the amount is zero or locks are invalid. - - -
public(friend) fun create_details(initiator: address, recipient: ethereum::EthereumAddress, amount: u64, nonce: u64): native_bridge::OutboundTransfer
-
- - - -
-Implementation - - -
public(friend) fun create_details(initiator: address, recipient: EthereumAddress, amount: u64, nonce: u64)
-    : OutboundTransfer {
-    assert!(amount > 0, EZERO_AMOUNT);
-
-    // Create a bridge transfer ID algorithmically
-    let combined_bytes = vector::empty<u8>();
-    vector::append(&mut combined_bytes, bcs::to_bytes(&initiator));
-    vector::append(&mut combined_bytes, bcs::to_bytes(&recipient));
-    vector::append(&mut combined_bytes, bcs::to_bytes(&amount));
-    vector::append(&mut combined_bytes, bcs::to_bytes(&nonce));
-    let bridge_transfer_id = keccak256(combined_bytes);
-
-    OutboundTransfer {
-        bridge_transfer_id,
-        initiator,
-        recipient,
-        amount,
-    }
-}
-
- - - -
- - - -## Function `add` - -Record details of an initiated transfer for quick lookup of details, mapping bridge transfer ID to transfer details - -@param bridge_transfer_id Bridge transfer ID. -@param details The bridge transfer details - - -
public(friend) fun add(nonce: u64, details: native_bridge::OutboundTransfer)
-
- - - -
-Implementation - - -
public(friend) fun add(nonce: u64, details: OutboundTransfer) acquires SmartTableWrapper {
-    assert!(features::abort_native_bridge_enabled(), ENATIVE_BRIDGE_NOT_ENABLED);
-
-    let table = borrow_global_mut<SmartTableWrapper<u64, OutboundTransfer>>(@aptos_framework);
-    smart_table::add(&mut table.inner, nonce, details);
-}
-
- - - -
- - - -## Function `set_bridge_transfer_id_to_inbound_nonce` - -Record details of a completed transfer, mapping bridge transfer ID to inbound nonce - -@param bridge_transfer_id Bridge transfer ID. -@param details The bridge transfer details - - -
public(friend) fun set_bridge_transfer_id_to_inbound_nonce(bridge_transfer_id: vector<u8>, inbound_nonce: u64)
-
- - - -
-Implementation - - -
public(friend) fun set_bridge_transfer_id_to_inbound_nonce(bridge_transfer_id: vector<u8>, inbound_nonce: u64) acquires SmartTableWrapper {
-    assert!(features::abort_native_bridge_enabled(), ENATIVE_BRIDGE_NOT_ENABLED);
-
-    assert_valid_bridge_transfer_id(&bridge_transfer_id);
-    let table = borrow_global_mut<SmartTableWrapper<vector<u8>, u64>>(@aptos_framework);
-    smart_table::add(&mut table.inner, bridge_transfer_id, inbound_nonce);
-}
-
- - - -
- - - -## Function `assert_valid_bridge_transfer_id` - -Asserts that the bridge transfer ID is valid. - -@param bridge_transfer_id The bridge transfer ID to validate. -@abort If the ID is invalid. - - -
public(friend) fun assert_valid_bridge_transfer_id(bridge_transfer_id: &vector<u8>)
-
- - - -
-Implementation - - -
public(friend) fun assert_valid_bridge_transfer_id(bridge_transfer_id: &vector<u8>) {
-    assert!(vector::length(bridge_transfer_id) == 32, EINVALID_BRIDGE_TRANSFER_ID);
-}
-
- - - -
- - - -## Function `bridge_transfer_id` - -Generates a unique outbound bridge transfer ID based on transfer details and nonce. - -@param details The bridge transfer details. -@return The generated bridge transfer ID. - - -
public(friend) fun bridge_transfer_id(initiator: address, recipient: ethereum::EthereumAddress, amount: u64, nonce: u64): vector<u8>
-
- - - -
-Implementation - - -
public(friend) fun bridge_transfer_id(initiator: address, recipient: EthereumAddress, amount: u64, nonce: u64) : vector<u8> {
-    // Serialize each param
-    let initiator_bytes = bcs::to_bytes<address>(&initiator);
-    let recipient_bytes = ethereum::get_inner_ethereum_address(recipient);
-    let amount_bytes = normalize_u64_to_32_bytes(&amount);
-    let nonce_bytes = normalize_u64_to_32_bytes(&nonce);
-    //Contatenate then hash and return bridge transfer ID
-    let combined_bytes = vector::empty<u8>();
-    vector::append(&mut combined_bytes, initiator_bytes);
-    vector::append(&mut combined_bytes, recipient_bytes);
-    vector::append(&mut combined_bytes, amount_bytes);
-    vector::append(&mut combined_bytes, nonce_bytes);
-    keccak256(combined_bytes)
-}
-
- - - -
- - - -## Function `bridge_relayer` - -Retrieves the address of the current bridge relayer. - -@return The address of the current bridge relayer. - - -
#[view]
-public fun bridge_relayer(): address
-
- - - -
-Implementation - - -
public fun bridge_relayer(): address acquires BridgeConfig {
-    borrow_global_mut<BridgeConfig>(@aptos_framework).bridge_relayer
-}
-
- - - -
- - - -## Function `insurance_fund` - -Retrieves the address of the current insurance fund. - -@return The address of the current insurance fund. - - -
#[view]
-public fun insurance_fund(): address
-
- - - -
-Implementation - - -
public fun insurance_fund(): address acquires BridgeConfig {
-    borrow_global_mut<BridgeConfig>(@aptos_framework).insurance_fund
-}
-
- - - -
- - - -## Function `insurance_budget_divider` - -Retrieves the current insurance budget divider. - -@return The current insurance budget divider. - - -
#[view]
-public fun insurance_budget_divider(): u64
-
- - - -
-Implementation - - -
public fun insurance_budget_divider(): u64 acquires BridgeConfig {
-    borrow_global_mut<BridgeConfig>(@aptos_framework).insurance_budget_divider
-}
-
- - - -
- - - -## Function `bridge_fee` - -Retrieves the current bridge fee. - -@return The current bridge fee. - - -
#[view]
-public fun bridge_fee(): u64
-
- - - -
-Implementation - - -
public fun bridge_fee(): u64 acquires BridgeConfig {
-    borrow_global_mut<BridgeConfig>(@aptos_framework).bridge_fee
-}
-
- - - -
- - - -## Function `get_bridge_transfer_details_from_nonce` - -Gets the bridge transfer details (OutboundTransfer) from the given nonce. -@param nonce The nonce of the bridge transfer. -@return The OutboundTransfer struct containing the transfer details. -@abort If the nonce is not found in the smart table. - - -
#[view]
-public fun get_bridge_transfer_details_from_nonce(nonce: u64): native_bridge::OutboundTransfer
-
- - - -
-Implementation - - -
public fun get_bridge_transfer_details_from_nonce(nonce: u64): OutboundTransfer acquires SmartTableWrapper {
-    let table = borrow_global<SmartTableWrapper<u64, OutboundTransfer>>(@aptos_framework);
-
-    // Check if the nonce exists in the table
-    assert!(smart_table::contains(&table.inner, nonce), ENONCE_NOT_FOUND);
-
-    // If it exists, return the associated `OutboundTransfer` details
-    *smart_table::borrow(&table.inner, nonce)
-}
-
- - - -
- - - -## Function `get_inbound_nonce_from_bridge_transfer_id` - -Gets inbound nonce from bridge_transfer_id -@param bridge_transfer_id The ID bridge transfer. -@return the nonce -@abort If the nonce is not found in the smart table. - - -
#[view]
-public fun get_inbound_nonce_from_bridge_transfer_id(bridge_transfer_id: vector<u8>): u64
-
- - - -
-Implementation - - -
public fun get_inbound_nonce_from_bridge_transfer_id(bridge_transfer_id: vector<u8>): u64 acquires SmartTableWrapper {
-    let table = borrow_global<SmartTableWrapper<vector<u8>, u64>>(@aptos_framework);
-
-     // Check if the nonce exists in the table
-    assert!(smart_table::contains(&table.inner, bridge_transfer_id), ENONCE_NOT_FOUND);
-
-    // If it exists, return the associated nonce
-    *smart_table::borrow(&table.inner, bridge_transfer_id)
-}
-
- - - -
- - - -## Function `increment_and_get_nonce` - -Increment and get the current nonce - - -
fun increment_and_get_nonce(): u64
-
- - - -
-Implementation - - -
fun increment_and_get_nonce(): u64 acquires Nonce {
-    let nonce_ref = borrow_global_mut<Nonce>(@aptos_framework);
-    nonce_ref.value = nonce_ref.value + 1;
-    nonce_ref.value
-}
-
- - - -
- - - -## Function `store_aptos_coin_burn_cap` - -Stores the burn capability for AptosCoin, converting to a fungible asset reference if the feature is enabled. - -@param aptos_framework The signer representing the Aptos framework. -@param burn_cap The burn capability for AptosCoin. - - -
public fun store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: coin::BurnCapability<aptos_coin::AptosCoin>)
-
- - - -
-Implementation - - -
public fun store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: BurnCapability<AptosCoin>) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    if (features::operations_default_to_fa_apt_store_enabled()) {
-        let burn_ref = coin::convert_and_take_paired_burn_ref(burn_cap);
-        move_to(aptos_framework, AptosFABurnCapabilities { burn_ref });
-    } else {
-        move_to(aptos_framework, AptosCoinBurnCapability { burn_cap })
-    }
-}
-
- - - -
- - - -## Function `store_aptos_coin_mint_cap` - -Stores the mint capability for AptosCoin. - -@param aptos_framework The signer representing the Aptos framework. -@param mint_cap The mint capability for AptosCoin. - - -
public fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: coin::MintCapability<aptos_coin::AptosCoin>)
-
- - - -
-Implementation - - -
public fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: MintCapability<AptosCoin>) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    move_to(aptos_framework, AptosCoinMintCapability { mint_cap })
-}
-
- - - -
- - - -## Function `mint_to` - -Mints a specified amount of AptosCoin to a recipient's address. - -@param core_resource The signer representing the core resource account. -@param recipient The address of the recipient to mint coins to. -@param amount The amount of AptosCoin to mint. - - -
public fun mint_to(aptos_framework: &signer, recipient: address, amount: u64)
-
- - - -
-Implementation - - -
public fun mint_to(aptos_framework: &signer, recipient: address, amount: u64) acquires AptosCoinMintCapability {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    mint_internal(recipient, amount);
-}
-
- - - -
- - - -## Function `mint` - -Mints a specified amount of AptosCoin to a recipient's address. - -@param recipient The address of the recipient to mint coins to. -@param amount The amount of AptosCoin to mint. -@abort If the mint capability is not available. - - -
public(friend) fun mint(recipient: address, amount: u64)
-
- - - -
-Implementation - - -
public(friend) fun mint(recipient: address, amount: u64) acquires AptosCoinMintCapability {
-    assert!(features::abort_native_bridge_enabled(), ENATIVE_BRIDGE_NOT_ENABLED);
-
-    mint_internal(recipient, amount);
-}
-
- - - -
- - - -## Function `mint_internal` - -Mints a specified amount of AptosCoin to a recipient's address. - -@param recipient The address of the recipient to mint coins to. -@param amount The amount of AptosCoin to mint. - - -
fun mint_internal(recipient: address, amount: u64)
-
- - - -
-Implementation - - -
fun mint_internal(recipient: address, amount: u64) acquires AptosCoinMintCapability {
-    coin::deposit(recipient, coin::mint(
-        amount,
-        &borrow_global<AptosCoinMintCapability>(@aptos_framework).mint_cap
-    ));
-}
-
- - - -
- - - -## Function `burn_from` - -Burns a specified amount of AptosCoin from an address. - -@param core_resource The signer representing the core resource account. -@param from The address from which to burn AptosCoin. -@param amount The amount of AptosCoin to burn. -@abort If the burn capability is not available. - - -
public fun burn_from(aptos_framework: &signer, from: address, amount: u64)
-
- - - -
-Implementation - - -
public fun burn_from(aptos_framework: &signer, from: address, amount: u64) acquires AptosCoinBurnCapability {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    burn_internal(from, amount);
-}
-
- - - -
- - - -## Function `burn` - -Burns a specified amount of AptosCoin from an address. - -@param from The address from which to burn AptosCoin. -@param amount The amount of AptosCoin to burn. -@abort If the burn capability is not available. - - -
public(friend) fun burn(from: address, amount: u64)
-
- - - -
-Implementation - - -
public(friend) fun burn(from: address, amount: u64) acquires AptosCoinBurnCapability {
-    assert!(features::abort_native_bridge_enabled(), ENATIVE_BRIDGE_NOT_ENABLED);
-
-    burn_internal(from, amount);
-}
-
- - - -
- - - -## Function `burn_internal` - -Burns a specified amount of AptosCoin from an address. - -@param from The address from which to burn AptosCoin. -@param amount The amount of AptosCoin to burn. - - -
fun burn_internal(from: address, amount: u64)
-
- - - -
-Implementation - - -
fun burn_internal(from: address, amount: u64) acquires AptosCoinBurnCapability {
-    coin::burn_from(
-        from,
-        amount,
-        &borrow_global<AptosCoinBurnCapability>(@aptos_framework).burn_cap,
-    );
-}
-
- - - -
- - - -## Function `initiate_bridge_transfer` - -Initiate a bridge transfer of MOVE from Movement to Ethereum -Anyone can initiate a bridge transfer from the source chain -The amount is burnt from the initiator and the module-level nonce is incremented -@param initiator The initiator's Ethereum address as a vector of bytes. -@param recipient The address of the recipient on the Aptos blockchain. -@param amount The amount of assets to be locked. - - -
public entry fun initiate_bridge_transfer(initiator: &signer, recipient: vector<u8>, amount: u64)
-
- - - -
-Implementation - - -
public entry fun initiate_bridge_transfer(
-    initiator: &signer,
-    recipient: vector<u8>,
-    amount: u64
-) acquires BridgeEvents, Nonce, AptosCoinBurnCapability, AptosCoinMintCapability, SmartTableWrapper, OutboundRateLimitBudget, BridgeConfig {
-    let initiator_address = signer::address_of(initiator);
-    let ethereum_address = ethereum::ethereum_address_20_bytes(recipient);
-
-    // Ensure the amount is enough for the bridge fee and charge for it
-    let new_amount = charge_bridge_fee(amount);
-
-    assert_outbound_rate_limit_budget_not_exceeded(new_amount);
-
-    // Increment and retrieve the nonce
-    let nonce = increment_and_get_nonce();
-
-    // Create bridge transfer details
-    let details = create_details(
-        initiator_address,
-        ethereum_address,
-        new_amount,
-        nonce
-    );
-
-    let bridge_transfer_id = bridge_transfer_id(
-        initiator_address,
-        ethereum_address,
-        new_amount,
-        nonce
-    );
-
-    // Add the transfer details to storage
-    add(nonce, details);
-
-    // Burn the amount from the initiator
-    burn_internal(initiator_address, amount);
-
-    let bridge_events = borrow_global_mut<BridgeEvents>(@aptos_framework);
-
-    // Emit an event with nonce
-    event::emit_event(
-         &mut bridge_events.bridge_transfer_initiated_events,
-        BridgeTransferInitiatedEvent {
-            bridge_transfer_id,
-            initiator: initiator_address,
-            recipient,
-            amount: new_amount,
-            nonce,
-        }
-    );
-}
-
- - - -
- - - -## Function `complete_bridge_transfer` - -Completes a bridge transfer on the destination chain. - -@param caller The signer representing the bridge relayer. -@param initiator The initiator's Ethereum address as a vector of bytes. -@param bridge_transfer_id The unique identifier for the bridge transfer. -@param recipient The address of the recipient on the Aptos blockchain. -@param amount The amount of assets to be locked. -@param nonce The unique nonce for the transfer. -@abort If the caller is not the bridge relayer or the transfer has already been processed. - - -
public entry fun complete_bridge_transfer(caller: &signer, bridge_transfer_id: vector<u8>, initiator: vector<u8>, recipient: address, amount: u64, nonce: u64)
-
- - - -
-Implementation - - -
public entry fun complete_bridge_transfer(
-    caller: &signer,
-    bridge_transfer_id: vector<u8>,
-    initiator: vector<u8>,
-    recipient: address,
-    amount: u64,
-    nonce: u64
-) acquires BridgeEvents, AptosCoinMintCapability, SmartTableWrapper, InboundRateLimitBudget, BridgeConfig {
-    // Ensure the caller is the bridge relayer
-    assert_is_caller_relayer(caller);
-    assert_inbound_rate_limit_budget_not_exceeded(amount);
-
-    // Check if the bridge transfer ID is already associated with an inbound nonce
-    let inbound_nonce_exists = is_inbound_nonce_set(bridge_transfer_id);
-    assert!(!inbound_nonce_exists, ETRANSFER_ALREADY_PROCESSED);
-    assert!(nonce > 0, EINVALID_NONCE);
-
-    // Validate the bridge_transfer_id by reconstructing the hash
-    let recipient_bytes = bcs::to_bytes(&recipient);
-    let amount_bytes = normalize_u64_to_32_bytes(&amount);
-    let nonce_bytes = normalize_u64_to_32_bytes(&nonce);
-
-    let combined_bytes = vector::empty<u8>();
-    vector::append(&mut combined_bytes, initiator);
-    vector::append(&mut combined_bytes, recipient_bytes);
-    vector::append(&mut combined_bytes, amount_bytes);
-    vector::append(&mut combined_bytes, nonce_bytes);
-
-    assert!(keccak256(combined_bytes) == bridge_transfer_id, EINVALID_BRIDGE_TRANSFER_ID);
-
-    // Record the transfer as completed by associating the bridge_transfer_id with the inbound nonce
-    set_bridge_transfer_id_to_inbound_nonce(bridge_transfer_id, nonce);
-
-    // Mint to the recipient
-    mint_internal(recipient, amount);
-
-    // Emit the event
-    let bridge_events = borrow_global_mut<BridgeEvents>(@aptos_framework);
-    event::emit_event(
-        &mut bridge_events.bridge_transfer_completed_events,
-        BridgeTransferCompletedEvent {
-            bridge_transfer_id,
-            initiator,
-            recipient,
-            amount,
-            nonce,
-        },
-    );
-}
-
- - - -
- - - -## Function `charge_bridge_fee` - -Charge bridge fee to the initiate bridge transfer. - -@param initiator The signer representing the initiator. -@param amount The amount to be charged. -@return The new amount after deducting the bridge fee. - - -
fun charge_bridge_fee(amount: u64): u64
-
- - - -
-Implementation - - -
fun charge_bridge_fee(amount: u64) : u64 acquires AptosCoinMintCapability, BridgeConfig {
-    let bridge_fee = bridge_fee();
-    let bridge_relayer = bridge_relayer();
-    assert!(amount > bridge_fee, EINVALID_AMOUNT);
-    let new_amount = amount - bridge_fee;
-    mint_internal(bridge_relayer, bridge_fee);
-    new_amount
-}
-
- - - -
- - - -## Function `update_bridge_relayer` - -Updates the bridge relayer, requiring governance validation. - -@param aptos_framework The signer representing the Aptos framework. -@param new_relayer The new address to be set as the bridge relayer. -@abort If the current relayer is the same as the new relayer. - - -
public fun update_bridge_relayer(aptos_framework: &signer, new_relayer: address)
-
- - - -
-Implementation - - -
public fun update_bridge_relayer(aptos_framework: &signer, new_relayer: address
-)   acquires BridgeConfig {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    let bridge_config = borrow_global_mut<BridgeConfig>(@aptos_framework);
-    let old_relayer = bridge_config.bridge_relayer;
-    assert!(old_relayer != new_relayer, EINVALID_BRIDGE_RELAYER);
-
-    bridge_config.bridge_relayer = new_relayer;
-
-    event::emit(
-        BridgeConfigRelayerUpdated {
-            old_relayer,
-            new_relayer,
-        },
-    );
-}
-
- - - -
- - - -## Function `update_bridge_fee` - -Updates the bridge fee, requiring relayer validation. - -@param relayer The signer representing the Relayer. -@param new_bridge_fee The new bridge fee to be set. -@abort If the new bridge fee is the same as the old bridge fee. - - -
public entry fun update_bridge_fee(relayer: &signer, new_bridge_fee: u64)
-
- - - -
-Implementation - - -
public entry fun update_bridge_fee(relayer: &signer, new_bridge_fee: u64
-) acquires BridgeConfig {
-    assert_is_caller_relayer(relayer);
-    let bridge_config = borrow_global_mut<BridgeConfig>(@aptos_framework);
-    let old_bridge_fee = bridge_config.bridge_fee;
-    assert!(old_bridge_fee != new_bridge_fee, ESAME_FEE);
-    bridge_config.bridge_fee = new_bridge_fee;
-
-    event::emit(
-        BridgeFeeChangedEvent {
-            old_bridge_fee,
-            new_bridge_fee,
-        },
-    );
-}
-
- - - -
- - - -## Function `update_insurance_fund` - -Updates the insurance fund, requiring governance validation. - -@param aptos_framework The signer representing the Aptos framework. -@param new_insurance_fund The new insurance fund to be set. -@abort If the new insurance fund is the same as the old insurance fund. - - -
public entry fun update_insurance_fund(aptos_framework: &signer, new_insurance_fund: address)
-
- - - -
-Implementation - - -
public entry fun update_insurance_fund(aptos_framework: &signer, new_insurance_fund: address
-) acquires BridgeConfig {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    let bridge_config = borrow_global_mut<BridgeConfig>(@aptos_framework);
-    let old_insurance_fund = bridge_config.insurance_fund;
-    assert!(old_insurance_fund != new_insurance_fund, EINVALID_VALUE);
-    bridge_config.insurance_fund = new_insurance_fund;
-
-    event::emit(
-        BridgeInsuranceFundChangedEvent {
-            old_insurance_fund,
-            new_insurance_fund,
-        },
-    );
-}
-
- - - -
- - - -## Function `update_insurance_budget_divider` - -Updates the insurance budget divider, requiring governance validation. - -@param aptos_framework The signer representing the Aptos framework. -@param new_insurance_budget_divider The new insurance budget divider to be set. -@abort If the new insurance budget divider is the same as the old insurance budget divider. - - -
public entry fun update_insurance_budget_divider(aptos_framework: &signer, new_insurance_budget_divider: u64)
-
- - - -
-Implementation - - -
public entry fun update_insurance_budget_divider(aptos_framework: &signer, new_insurance_budget_divider: u64
-) acquires BridgeConfig {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    // Ensure the new insurance budget divider is greater than 1 and different from the old one
-    // Assumes symmetric Insurance Funds on both L1 and L2
-    assert!(new_insurance_budget_divider > 1, EINVALID_VALUE);
-
-    let bridge_config = borrow_global_mut<BridgeConfig>(@aptos_framework);
-    let old_insurance_budget_divider = bridge_config.insurance_budget_divider;
-    assert!(old_insurance_budget_divider != new_insurance_budget_divider, EINVALID_VALUE);
-
-    bridge_config.insurance_budget_divider = new_insurance_budget_divider;
-
-    event::emit(
-        BridgeInsuranceBudgetDividerChangedEvent {
-            old_insurance_budget_divider,
-            new_insurance_budget_divider,
-        },
-    );
-}
-
- - - -
- - - -## Function `assert_is_caller_relayer` - -Asserts that the caller is the current bridge relayer. - -@param caller The signer whose authority is being checked. -@abort If the caller is not the current bridge relayer. - - -
public(friend) fun assert_is_caller_relayer(caller: &signer)
-
- - - -
-Implementation - - -
public(friend) fun assert_is_caller_relayer(caller: &signer
-) acquires BridgeConfig {
-    assert!(borrow_global<BridgeConfig>(@aptos_framework).bridge_relayer == signer::address_of(caller), EINVALID_BRIDGE_RELAYER);
-}
-
- - - -
- - - -## Function `assert_outbound_rate_limit_budget_not_exceeded` - -Asserts that the rate limit budget is not exceeded. - -@param amount The amount to be transferred. - - -
fun assert_outbound_rate_limit_budget_not_exceeded(amount: u64)
-
- - - -
-Implementation - - -
fun assert_outbound_rate_limit_budget_not_exceeded(amount: u64) acquires OutboundRateLimitBudget, BridgeConfig {
-    let insurance_fund = borrow_global<BridgeConfig>(@aptos_framework).insurance_fund;
-    let insurance_budget_divider = borrow_global<BridgeConfig>(@aptos_framework).insurance_budget_divider;
-    let table = borrow_global_mut<OutboundRateLimitBudget>(@aptos_framework);
-
-    let day = timestamp::now_seconds() / 86400;
-    let current_budget = smart_table::borrow_mut_with_default(&mut table.day, day, 0);
-    smart_table::upsert(&mut table.day, day, *current_budget + amount);
-    let rate_limit = coin::balance<AptosCoin>(insurance_fund) / insurance_budget_divider;
-    assert!(*smart_table::borrow(&table.day, day) < rate_limit, ERATE_LIMIT_EXCEEDED);
-}
-
- - - -
- - - -## Function `assert_inbound_rate_limit_budget_not_exceeded` - -Asserts that the rate limit budget is not exceeded. - -@param amount The amount to be transferred. - - -
fun assert_inbound_rate_limit_budget_not_exceeded(amount: u64)
-
- - - -
-Implementation - - -
fun assert_inbound_rate_limit_budget_not_exceeded(amount: u64) acquires InboundRateLimitBudget, BridgeConfig {
-    let insurance_fund = borrow_global<BridgeConfig>(@aptos_framework).insurance_fund;
-    let insurance_budget_divider = borrow_global<BridgeConfig>(@aptos_framework).insurance_budget_divider;
-    let table = borrow_global_mut<InboundRateLimitBudget>(@aptos_framework);
-
-    let day = timestamp::now_seconds() / 86400;
-    let current_budget = smart_table::borrow_mut_with_default(&mut table.day, day, 0);
-    smart_table::upsert(&mut table.day, day, *current_budget + amount);
-    let rate_limit = coin::balance<AptosCoin>(insurance_fund) / insurance_budget_divider;
-    assert!(*smart_table::borrow(&table.day, day) < rate_limit, ERATE_LIMIT_EXCEEDED);
-}
-
- - - -
- - - -## Function `test_normalize_u64_to_32_bytes_helper` - -Test serialization of u64 to 32 bytes - - -
fun test_normalize_u64_to_32_bytes_helper(x: u64, expected: vector<u8>)
-
- - - -
-Implementation - - -
fun test_normalize_u64_to_32_bytes_helper(x: u64, expected: vector<u8>) {
-    let r = normalize_u64_to_32_bytes(&x);
-    assert!(vector::length(&r) == 32, 0);
-    assert!(r == expected, 0);
-}
-
- - - -
- - -[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/nonce_validation.md b/aptos-move/framework/aptos-framework/doc/nonce_validation.md new file mode 100644 index 0000000000000..5ecac00c7c2c6 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/nonce_validation.md @@ -0,0 +1,446 @@ + + + +# Module `0x1::nonce_validation` + + + +- [Resource `NonceHistory`](#0x1_nonce_validation_NonceHistory) +- [Struct `Bucket`](#0x1_nonce_validation_Bucket) +- [Struct `NonceKeyWithExpTime`](#0x1_nonce_validation_NonceKeyWithExpTime) +- [Struct `NonceKey`](#0x1_nonce_validation_NonceKey) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_nonce_validation_initialize) +- [Function `initialize_nonce_table`](#0x1_nonce_validation_initialize_nonce_table) +- [Function `empty_bucket`](#0x1_nonce_validation_empty_bucket) +- [Function `add_nonce_buckets`](#0x1_nonce_validation_add_nonce_buckets) +- [Function `check_and_insert_nonce`](#0x1_nonce_validation_check_and_insert_nonce) + + +
use 0x1::aptos_hash;
+use 0x1::big_ordered_map;
+use 0x1::error;
+use 0x1::option;
+use 0x1::system_addresses;
+use 0x1::table;
+use 0x1::timestamp;
+
+ + + + + +## Resource `NonceHistory` + + + +
struct NonceHistory has key
+
+ + + +
+Fields + + +
+
+nonce_table: table::Table<u64, nonce_validation::Bucket> +
+
+ +
+
+next_key: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Bucket` + + + +
struct Bucket has store
+
+ + + +
+Fields + + +
+
+nonces_ordered_by_exp_time: big_ordered_map::BigOrderedMap<nonce_validation::NonceKeyWithExpTime, bool> +
+
+ +
+
+nonce_to_exp_time_map: big_ordered_map::BigOrderedMap<nonce_validation::NonceKey, u64> +
+
+ +
+
+ + +
+ + + +## Struct `NonceKeyWithExpTime` + + + +
struct NonceKeyWithExpTime has copy, drop, store
+
+ + + +
+Fields + + +
+
+txn_expiration_time: u64 +
+
+ +
+
+sender_address: address +
+
+ +
+
+nonce: u64 +
+
+ +
+
+ + +
+ + + +## Struct `NonceKey` + + + +
struct NonceKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+sender_address: address +
+
+ +
+
+nonce: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ETRANSACTION_EXPIRATION_TOO_FAR_IN_FUTURE: u64 = 1002;
+
+ + + + + + + +
const E_NONCE_HISTORY_DOES_NOT_EXIST: u64 = 1001;
+
+ + + + + + + +
const MAX_ENTRIES_GARBAGE_COLLECTED_PER_CALL: u64 = 5;
+
+ + + + + + + +
const NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS: u64 = 65;
+
+ + + + + + + +
const NUM_BUCKETS: u64 = 50000;
+
+ + + + + +## Function `initialize` + + + +
public(friend) fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun initialize(aptos_framework: &signer) {
+    initialize_nonce_table(aptos_framework);
+}
+
+ + + +
+ + + +## Function `initialize_nonce_table` + + + +
public entry fun initialize_nonce_table(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun initialize_nonce_table(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    if (!exists<NonceHistory>(@aptos_framework)) {
+        let table = table::new();
+        let nonce_history = NonceHistory {
+            nonce_table: table,
+            next_key: 0,
+        };
+        move_to<NonceHistory>(aptos_framework, nonce_history);
+    };
+}
+
+ + + +
+ + + +## Function `empty_bucket` + + + +
fun empty_bucket(pre_allocate_slots: bool): nonce_validation::Bucket
+
+ + + +
+Implementation + + +
fun empty_bucket(pre_allocate_slots: bool): Bucket {
+    let bucket = Bucket {
+        nonces_ordered_by_exp_time: big_ordered_map::new_with_reusable(),
+        nonce_to_exp_time_map: big_ordered_map::new_with_reusable(),
+    };
+
+    if (pre_allocate_slots) {
+        // Initiating big ordered maps with 5 pre-allocated storage slots.
+        // (expiration time, address, nonce) is together 48 bytes.
+        // A 4 KB storage slot can store 80+ such tuples.
+        // The 5 slots should be more than enough for the current use case.
+        bucket.nonces_ordered_by_exp_time.allocate_spare_slots(5);
+        bucket.nonce_to_exp_time_map.allocate_spare_slots(5);
+    };
+    bucket
+}
+
+ + + +
+ + + +## Function `add_nonce_buckets` + + + +
public entry fun add_nonce_buckets(count: u64)
+
+ + + +
+Implementation + + +
public entry fun add_nonce_buckets(count: u64) acquires NonceHistory {
+    assert!(exists<NonceHistory>(@aptos_framework), error::invalid_state(E_NONCE_HISTORY_DOES_NOT_EXIST));
+    let nonce_history = &mut NonceHistory[@aptos_framework];
+    for (i in 0..count) {
+        if (nonce_history.next_key <= NUM_BUCKETS) {
+            if (!nonce_history.nonce_table.contains(nonce_history.next_key)) {
+                nonce_history.nonce_table.add(
+                    nonce_history.next_key,
+                    empty_bucket(true)
+                );
+            };
+            nonce_history.next_key = nonce_history.next_key + 1;
+        }
+    }
+}
+
+ + + +
+ + + +## Function `check_and_insert_nonce` + + + +
public(friend) fun check_and_insert_nonce(sender_address: address, nonce: u64, txn_expiration_time: u64): bool
+
+ + + +
+Implementation + + +
public(friend) fun check_and_insert_nonce(
+    sender_address: address,
+    nonce: u64,
+    txn_expiration_time: u64,
+): bool acquires NonceHistory {
+    assert!(exists<NonceHistory>(@aptos_framework), error::invalid_state(E_NONCE_HISTORY_DOES_NOT_EXIST));
+    // Check if the transaction expiration time is too far in the future.
+    assert!(txn_expiration_time <= timestamp::now_seconds() + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS, error::invalid_argument(ETRANSACTION_EXPIRATION_TOO_FAR_IN_FUTURE));
+    let nonce_history = &mut NonceHistory[@aptos_framework];
+    let nonce_key = NonceKey {
+        sender_address,
+        nonce,
+    };
+    let bucket_index = sip_hash_from_value(&nonce_key) % NUM_BUCKETS;
+    let current_time = timestamp::now_seconds();
+    if (!nonce_history.nonce_table.contains(bucket_index)) {
+        nonce_history.nonce_table.add(
+            bucket_index,
+            empty_bucket(false)
+        );
+    };
+    let bucket = table::borrow_mut(&mut nonce_history.nonce_table, bucket_index);
+
+    let existing_exp_time = bucket.nonce_to_exp_time_map.get(&nonce_key);
+    if (existing_exp_time.is_some()) {
+        let existing_exp_time = existing_exp_time.extract();
+
+        // If the existing (address, nonce) pair has not expired, return false.
+        if (existing_exp_time >= current_time) {
+            return false;
+        };
+
+        // We maintain an invariant that two transaction with the same (address, nonce) pair cannot be stored
+        // in the nonce history if their transaction expiration times are less than `NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS`
+        // seconds apart.
+        if (txn_expiration_time <= existing_exp_time + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS) {
+            return false;
+        };
+
+        // If the existing (address, nonce) pair has expired, garbage collect it.
+        bucket.nonce_to_exp_time_map.remove(&nonce_key);
+        bucket.nonces_ordered_by_exp_time.remove(&NonceKeyWithExpTime {
+            txn_expiration_time: existing_exp_time,
+            sender_address,
+            nonce,
+        });
+    };
+
+    // Garbage collect upto MAX_ENTRIES_GARBAGE_COLLECTED_PER_CALL expired nonces in the bucket.
+    let i = 0;
+    while (i < MAX_ENTRIES_GARBAGE_COLLECTED_PER_CALL && !bucket.nonces_ordered_by_exp_time.is_empty()) {
+        let (front_k, _) = bucket.nonces_ordered_by_exp_time.borrow_front();
+        // We garbage collect a nonce after it has expired and the NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS
+        // seconds have passed.
+        if (front_k.txn_expiration_time + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS < current_time) {
+            bucket.nonces_ordered_by_exp_time.pop_front();
+            bucket.nonce_to_exp_time_map.remove(&NonceKey {
+                sender_address: front_k.sender_address,
+                nonce: front_k.nonce,
+            });
+        } else {
+            break;
+        };
+        i = i + 1;
+    };
+
+    // Insert the (address, nonce) pair in the bucket.
+    let nonce_key_with_exp_time = NonceKeyWithExpTime {
+        txn_expiration_time,
+        sender_address,
+        nonce,
+    };
+    bucket.nonces_ordered_by_exp_time.add(nonce_key_with_exp_time, true);
+    bucket.nonce_to_exp_time_map.add(nonce_key, txn_expiration_time);
+    true
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/object.md b/aptos-move/framework/aptos-framework/doc/object.md index ef2998c62b846..59105248363e2 100644 --- a/aptos-move/framework/aptos-framework/doc/object.md +++ b/aptos-move/framework/aptos-framework/doc/object.md @@ -32,6 +32,7 @@ make it so that a reference to a global object can be returned from a function. - [Struct `TransferRef`](#0x1_object_TransferRef) - [Struct `LinearTransferRef`](#0x1_object_LinearTransferRef) - [Struct `DeriveRef`](#0x1_object_DeriveRef) +- [Struct `TransferPermission`](#0x1_object_TransferPermission) - [Struct `TransferEvent`](#0x1_object_TransferEvent) - [Struct `Transfer`](#0x1_object_Transfer) - [Constants](#@Constants_0) @@ -89,6 +90,8 @@ make it so that a reference to a global object can be returned from a function. - [Function `is_owner`](#0x1_object_is_owner) - [Function `owns`](#0x1_object_owns) - [Function `root_owner`](#0x1_object_root_owner) +- [Function `grant_permission`](#0x1_object_grant_permission) +- [Function `grant_permission_with_transfer_ref`](#0x1_object_grant_permission_with_transfer_ref) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) @@ -133,6 +136,7 @@ make it so that a reference to a global object can be returned from a function. - [Function `is_owner`](#@Specification_1_is_owner) - [Function `owns`](#@Specification_1_owns) - [Function `root_owner`](#@Specification_1_root_owner) + - [Function `grant_permission`](#@Specification_1_grant_permission)
use 0x1::account;
@@ -144,6 +148,7 @@ make it so that a reference to a global object can be returned from a function.
 use 0x1::from_bcs;
 use 0x1::guid;
 use 0x1::hash;
+use 0x1::permissioned_signer;
 use 0x1::signer;
 use 0x1::transaction_context;
 use 0x1::vector;
@@ -496,6 +501,34 @@ Used to create derived objects from a given objects.
 
 
 
+
+
+
+
+## Struct `TransferPermission`
+
+Permission to transfer object with permissioned signer.
+
+
+
struct TransferPermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+object: address +
+
+ +
+
+ +
@@ -624,7 +657,7 @@ Exceeds maximum nesting for an object transfer. - + The caller does not have ownership permissions @@ -644,6 +677,16 @@ The object does not have ungated transfers enabled + + +Cannot burn an object that is already burnt. + + +
const EOBJECT_ALREADY_BURNT: u64 = 10;
+
+ + + An object does not exist at this address @@ -1677,7 +1720,7 @@ Removes from the specified Object from global storage. } = object_core; if (exists<Untransferable>(ref.self)) { - let Untransferable {} = move_from<Untransferable>(ref.self); + let Untransferable {} = move_from<Untransferable>(ref.self); }; event::destroy_handle(transfer_events); @@ -1887,15 +1930,16 @@ Transfer to the destination address using a LinearTransferRef. to, }, ); + } else { + event::emit_event( + &mut object.transfer_events, + TransferEvent { + object: ref.self, + from: object.owner, + to, + }, + ); }; - event::emit_event( - &mut object.transfer_events, - TransferEvent { - object: ref.self, - from: object.owner, - to, - }, - ); object.owner = to; }
@@ -1988,6 +2032,10 @@ hierarchy. to: address, ) acquires ObjectCore { let owner_address = signer::address_of(owner); + assert!( + permissioned_signer::check_permission_exists(owner, TransferPermission { object }), + error::permission_denied(EOBJECT_NOT_TRANSFERRABLE) + ); verify_ungated_and_descendant(owner_address, object); transfer_raw_inner(object, to); } @@ -2023,15 +2071,16 @@ hierarchy. to, }, ); + } else { + event::emit_event( + &mut object_core.transfer_events, + TransferEvent { + object, + from: object_core.owner, + to, + }, + ); }; - event::emit_event( - &mut object_core.transfer_events, - TransferEvent { - object, - from: object_core.owner, - to, - }, - ); object_core.owner = to; }; } @@ -2130,9 +2179,10 @@ objects may have cyclic dependencies. ## Function `burn` -Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. +Add a TombStone to the object. The object will then be interpreted as hidden via indexers. This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. Original owners can reclaim burnt objects any time in the future by calling unburn. +Please use the test only [object::burn_object_with_transfer] for testing with previously burned objects.
public entry fun burn<T: key>(owner: &signer, object: object::Object<T>)
@@ -2148,8 +2198,8 @@ Original owners can reclaim burnt objects any time in the future by calling unbu
     let original_owner = signer::address_of(owner);
     assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER));
     let object_addr = object.inner;
+    assert!(!exists<TombStone>(object_addr), EOBJECT_ALREADY_BURNT);
     move_to(&create_signer(object_addr), TombStone { original_owner });
-    transfer_raw_inner(object_addr, BURN_ADDRESS);
 }
 
@@ -2179,10 +2229,26 @@ Allow origin owners to reclaim any objects they previous burnt. ) acquires TombStone, ObjectCore { let object_addr = object.inner; assert!(exists<TombStone>(object_addr), error::invalid_argument(EOBJECT_NOT_BURNT)); + assert!( + permissioned_signer::check_permission_exists(original_owner, TransferPermission { object: object_addr }), + error::permission_denied(EOBJECT_NOT_TRANSFERRABLE) + ); - let TombStone { original_owner: original_owner_addr } = move_from<TombStone>(object_addr); - assert!(original_owner_addr == signer::address_of(original_owner), error::permission_denied(ENOT_OBJECT_OWNER)); - transfer_raw_inner(object_addr, original_owner_addr); + // The new owner of the object can always unburn it, but if it's the burn address, we go to the old functionality + let object_core = borrow_global<ObjectCore>(object_addr); + if (object_core.owner == signer::address_of(original_owner)) { + let TombStone { original_owner: _ } = move_from<TombStone>(object_addr); + } else if (object_core.owner == BURN_ADDRESS) { + // The old functionality + let TombStone { original_owner: original_owner_addr } = move_from<TombStone>(object_addr); + assert!( + original_owner_addr == signer::address_of(original_owner), + error::permission_denied(ENOT_OBJECT_OWNER) + ); + transfer_raw_inner(object_addr, original_owner_addr); + } else { + abort error::permission_denied(ENOT_OBJECT_OWNER); + }; }
@@ -2227,7 +2293,8 @@ Return true if ungated transfer is allowed. Return the current owner. -
public fun owner<T: key>(object: object::Object<T>): address
+
#[view]
+public fun owner<T: key>(object: object::Object<T>): address
 
@@ -2256,7 +2323,8 @@ Return the current owner. Return true if the provided address is the current owner. -
public fun is_owner<T: key>(object: object::Object<T>, owner: address): bool
+
#[view]
+public fun is_owner<T: key>(object: object::Object<T>, owner: address): bool
 
@@ -2281,7 +2349,8 @@ Return true if the provided address is the current owner. Return true if the provided address has indirect or direct ownership of the provided object. -
public fun owns<T: key>(object: object::Object<T>, owner: address): bool
+
#[view]
+public fun owns<T: key>(object: object::Object<T>, owner: address): bool
 
@@ -2292,15 +2361,16 @@ Return true if the provided address has indirect or direct ownership of the prov
public fun owns<T: key>(object: Object<T>, owner: address): bool acquires ObjectCore {
     let current_address = object_address(&object);
-    if (current_address == owner) {
-        return true
-    };
 
     assert!(
         exists<ObjectCore>(current_address),
         error::not_found(EOBJECT_DOES_NOT_EXIST),
     );
 
+    if (current_address == owner) {
+        return true
+    };
+
     let object = borrow_global<ObjectCore>(current_address);
     let current_address = object.owner;
 
@@ -2331,7 +2401,8 @@ Returns the root owner of an object. As objects support nested ownership, it can
 to determine the identity of the starting point of ownership.
 
 
-
public fun root_owner<T: key>(object: object::Object<T>): address
+
#[view]
+public fun root_owner<T: key>(object: object::Object<T>): address
 
@@ -2351,6 +2422,70 @@ to determine the identity of the starting point of ownership. + + + + +## Function `grant_permission` + +Master signer offers a transfer permission of an object to a permissioned signer. + + +
public fun grant_permission<T>(master: &signer, permissioned_signer: &signer, object: object::Object<T>)
+
+ + + +
+Implementation + + +
public fun grant_permission<T>(
+    master: &signer,
+    permissioned_signer: &signer,
+    object: Object<T>,
+) {
+    permissioned_signer::authorize_unlimited(
+        master,
+        permissioned_signer,
+        TransferPermission { object: object.inner }
+    )
+}
+
+ + + +
+ + + +## Function `grant_permission_with_transfer_ref` + +Grant a transfer permission to the permissioned signer using TransferRef. + + +
public fun grant_permission_with_transfer_ref(permissioned_signer: &signer, ref: &object::TransferRef)
+
+ + + +
+Implementation + + +
public fun grant_permission_with_transfer_ref(
+    permissioned_signer: &signer,
+    ref: &TransferRef,
+) {
+    permissioned_signer::grant_unlimited_with_permissioned_signer(
+        permissioned_signer,
+        TransferPermission { object: ref.self }
+    )
+}
+
+ + +
@@ -2427,16 +2562,8 @@ to determine the identity of the starting point of ownership. ### Module-level Specification -
pragma aborts_if_is_strict;
-
- - - - - - - -
fun spec_exists_at<T: key>(object: address): bool;
+
pragma verify = false;
+pragma aborts_if_is_partial;
 
@@ -3255,7 +3382,8 @@ to determine the identity of the starting point of ownership. let object_address = object.inner; aborts_if !exists<ObjectCore>(object_address); aborts_if owner(object) != signer::address_of(owner); -aborts_if is_burnt(object); +ensures exists<TombStone>(object_address); +ensures is_owner(object, signer::address_of(owner));
@@ -3276,7 +3404,9 @@ to determine the identity of the starting point of ownership. aborts_if !exists<ObjectCore>(object_address); aborts_if !is_burnt(object); let tomb_stone = borrow_global<TombStone>(object_address); -aborts_if tomb_stone.original_owner != signer::address_of(original_owner); +let original_owner_address = signer::address_of(original_owner); +let object_current_owner = borrow_global<ObjectCore>(object_address).owner; +aborts_if object_current_owner != original_owner_address && tomb_stone.original_owner != original_owner_address;
@@ -3303,7 +3433,8 @@ to determine the identity of the starting point of ownership. ### Function `owner` -
public fun owner<T: key>(object: object::Object<T>): address
+
#[view]
+public fun owner<T: key>(object: object::Object<T>): address
 
@@ -3320,7 +3451,8 @@ to determine the identity of the starting point of ownership. ### Function `is_owner` -
public fun is_owner<T: key>(object: object::Object<T>, owner: address): bool
+
#[view]
+public fun is_owner<T: key>(object: object::Object<T>, owner: address): bool
 
@@ -3337,7 +3469,8 @@ to determine the identity of the starting point of ownership. ### Function `owns` -
public fun owns<T: key>(object: object::Object<T>, owner: address): bool
+
#[view]
+public fun owns<T: key>(object: object::Object<T>, owner: address): bool
 
@@ -3358,7 +3491,8 @@ to determine the identity of the starting point of ownership. ### Function `root_owner` -
public fun root_owner<T: key>(object: object::Object<T>): address
+
#[view]
+public fun root_owner<T: key>(object: object::Object<T>): address
 
@@ -3395,4 +3529,32 @@ to determine the identity of the starting point of ownership.
+ + + +### Function `grant_permission` + + +
public fun grant_permission<T>(master: &signer, permissioned_signer: &signer, object: object::Object<T>)
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if !permissioned_signer::spec_is_permissioned_signer(permissioned_signer);
+aborts_if permissioned_signer::spec_is_permissioned_signer(master);
+aborts_if signer::address_of(master) != signer::address_of(permissioned_signer);
+
+ + + + + + + +
fun spec_exists_at<T: key>(object: address): bool;
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/object_code_deployment.md b/aptos-move/framework/aptos-framework/doc/object_code_deployment.md index e4caf56a77c74..db2ac6c296522 100644 --- a/aptos-move/framework/aptos-framework/doc/object_code_deployment.md +++ b/aptos-move/framework/aptos-framework/doc/object_code_deployment.md @@ -190,6 +190,16 @@ Event emitted when code in an existing object is made immutable. + + +Current permissioned signer cannot deploy object code. + + +
const ENO_CODE_PERMISSION: u64 = 4;
+
+ + + Not the owner of the code_object @@ -243,6 +253,7 @@ the code to be published via code. T metadata_serialized: vector<u8>, code: vector<vector<u8>>, ) { + code::check_code_publishing_permission(publisher); assert!( features::is_object_code_deployment_enabled(), error::unavailable(EOBJECT_CODE_DEPLOYMENT_NOT_SUPPORTED), @@ -319,6 +330,7 @@ Requires the publisher to be the owner of the code_object. code: vector<vector<u8>>, code_object: Object<PackageRegistry>, ) acquires ManagingRefs { + code::check_code_publishing_permission(publisher); let publisher_address = signer::address_of(publisher); assert!( object::is_owner(code_object, publisher_address), diff --git a/aptos-move/framework/aptos-framework/doc/optional_aggregator.md b/aptos-move/framework/aptos-framework/doc/optional_aggregator.md index 309ee697c2448..10ece6b6de997 100644 --- a/aptos-move/framework/aptos-framework/doc/optional_aggregator.md +++ b/aptos-move/framework/aptos-framework/doc/optional_aggregator.md @@ -18,9 +18,6 @@ aggregator (parallelizable) or via normal integers. - [Function `destroy_integer`](#0x1_optional_aggregator_destroy_integer) - [Function `new`](#0x1_optional_aggregator_new) - [Function `switch`](#0x1_optional_aggregator_switch) -- [Function `switch_and_zero_out`](#0x1_optional_aggregator_switch_and_zero_out) -- [Function `switch_to_integer_and_zero_out`](#0x1_optional_aggregator_switch_to_integer_and_zero_out) -- [Function `switch_to_aggregator_and_zero_out`](#0x1_optional_aggregator_switch_to_aggregator_and_zero_out) - [Function `destroy`](#0x1_optional_aggregator_destroy) - [Function `destroy_optional_aggregator`](#0x1_optional_aggregator_destroy_optional_aggregator) - [Function `destroy_optional_integer`](#0x1_optional_aggregator_destroy_optional_integer) @@ -40,9 +37,6 @@ aggregator (parallelizable) or via normal integers. - [Function `destroy_integer`](#@Specification_1_destroy_integer) - [Function `new`](#@Specification_1_new) - [Function `switch`](#@Specification_1_switch) - - [Function `switch_and_zero_out`](#@Specification_1_switch_and_zero_out) - - [Function `switch_to_integer_and_zero_out`](#@Specification_1_switch_to_integer_and_zero_out) - - [Function `switch_to_aggregator_and_zero_out`](#@Specification_1_switch_to_aggregator_and_zero_out) - [Function `destroy`](#@Specification_1_destroy) - [Function `destroy_optional_aggregator`](#@Specification_1_destroy_optional_aggregator) - [Function `destroy_optional_integer`](#@Specification_1_destroy_optional_integer) @@ -132,6 +126,15 @@ Contains either an aggregator or a normal integer, both overflowing on limit. ## Constants + + + + +
const MAX_U128: u128 = 340282366920938463463374607431768211455;
+
+ + + The value of aggregator underflows (goes below zero). Raised by native code. @@ -152,6 +155,16 @@ Aggregator feature is not supported. Raised by native code. + + +OptionalAggregator (Agg V1) switch not supported any more. + + +
const ESWITCH_DEPRECATED: u64 = 3;
+
+ + + ## Function `new_integer` @@ -317,7 +330,7 @@ Destroys an integer. Creates a new optional aggregator. -
public(friend) fun new(limit: u128, parallelizable: bool): optional_aggregator::OptionalAggregator
+
public(friend) fun new(parallelizable: bool): optional_aggregator::OptionalAggregator
 
@@ -326,16 +339,16 @@ Creates a new optional aggregator. Implementation -
public(friend) fun new(limit: u128, parallelizable: bool): OptionalAggregator {
+
public(friend) fun new(parallelizable: bool): OptionalAggregator {
     if (parallelizable) {
         OptionalAggregator {
-            aggregator: option::some(aggregator_factory::create_aggregator_internal(limit)),
+            aggregator: option::some(aggregator_factory::create_aggregator_internal()),
             integer: option::none(),
         }
     } else {
         OptionalAggregator {
             aggregator: option::none(),
-            integer: option::some(new_integer(limit)),
+            integer: option::some(new_integer(MAX_U128)),
         }
     }
 }
@@ -352,7 +365,7 @@ Creates a new optional aggregator.
 Switches between parallelizable and non-parallelizable implementations.
 
 
-
public fun switch(optional_aggregator: &mut optional_aggregator::OptionalAggregator)
+
public fun switch(_optional_aggregator: &mut optional_aggregator::OptionalAggregator)
 
@@ -361,106 +374,8 @@ Switches between parallelizable and non-parallelizable implementations. Implementation -
public fun switch(optional_aggregator: &mut OptionalAggregator) {
-    let value = read(optional_aggregator);
-    switch_and_zero_out(optional_aggregator);
-    add(optional_aggregator, value);
-}
-
- - - - - - - -## Function `switch_and_zero_out` - -Switches between parallelizable and non-parallelizable implementations, setting -the value of the new optional aggregator to zero. - - -
fun switch_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator)
-
- - - -
-Implementation - - -
fun switch_and_zero_out(optional_aggregator: &mut OptionalAggregator) {
-    if (is_parallelizable(optional_aggregator)) {
-        switch_to_integer_and_zero_out(optional_aggregator);
-    } else {
-        switch_to_aggregator_and_zero_out(optional_aggregator);
-    }
-}
-
- - - -
- - - -## Function `switch_to_integer_and_zero_out` - -Switches from parallelizable to non-parallelizable implementation, zero-initializing -the value. - - -
fun switch_to_integer_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator): u128
-
- - - -
-Implementation - - -
fun switch_to_integer_and_zero_out(
-    optional_aggregator: &mut OptionalAggregator
-): u128 {
-    let aggregator = option::extract(&mut optional_aggregator.aggregator);
-    let limit = aggregator::limit(&aggregator);
-    aggregator::destroy(aggregator);
-    let integer = new_integer(limit);
-    option::fill(&mut optional_aggregator.integer, integer);
-    limit
-}
-
- - - -
- - - -## Function `switch_to_aggregator_and_zero_out` - -Switches from non-parallelizable to parallelizable implementation, zero-initializing -the value. - - -
fun switch_to_aggregator_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator): u128
-
- - - -
-Implementation - - -
fun switch_to_aggregator_and_zero_out(
-    optional_aggregator: &mut OptionalAggregator
-): u128 {
-    let integer = option::extract(&mut optional_aggregator.integer);
-    let limit = limit(&integer);
-    destroy_integer(integer);
-    let aggregator = aggregator_factory::create_aggregator_internal(limit);
-    option::fill(&mut optional_aggregator.aggregator, aggregator);
-    limit
+
public fun switch(_optional_aggregator: &mut OptionalAggregator) {
+    abort error::invalid_state(ESWITCH_DEPRECATED)
 }
 
@@ -796,7 +711,7 @@ Check for overflow.
aborts_if value > (integer.limit - integer.value);
-aborts_if integer.value + value > MAX_U128;
+aborts_if integer.value + value > MAX_U128;
 ensures integer.value <= integer.limit;
 ensures integer.value == old(integer.value) + value;
 
@@ -876,7 +791,7 @@ Check for overflow. ### Function `new` -
public(friend) fun new(limit: u128, parallelizable: bool): optional_aggregator::OptionalAggregator
+
public(friend) fun new(parallelizable: bool): optional_aggregator::OptionalAggregator
 
@@ -896,89 +811,13 @@ Check for overflow. ### Function `switch` -
public fun switch(optional_aggregator: &mut optional_aggregator::OptionalAggregator)
-
- - - - -
let vec_ref = optional_aggregator.integer.vec;
-aborts_if is_parallelizable(optional_aggregator) && len(vec_ref) != 0;
-aborts_if !is_parallelizable(optional_aggregator) && len(vec_ref) == 0;
-aborts_if !is_parallelizable(optional_aggregator) && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
-ensures optional_aggregator_value(optional_aggregator) == optional_aggregator_value(old(optional_aggregator));
-
- - - - - -### Function `switch_and_zero_out` - - -
fun switch_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator)
-
- - -Option does not exist When Option exists. -Option exists when Option does not exist. -The AggregatorFactory is under the @aptos_framework when Option does not exist. - - -
let vec_ref = optional_aggregator.integer.vec;
-aborts_if is_parallelizable(optional_aggregator) && len(vec_ref) != 0;
-aborts_if !is_parallelizable(optional_aggregator) && len(vec_ref) == 0;
-aborts_if !is_parallelizable(optional_aggregator) && !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
-// This enforces high-level requirement 3:
-ensures is_parallelizable(old(optional_aggregator)) ==> !is_parallelizable(optional_aggregator);
-ensures !is_parallelizable(old(optional_aggregator)) ==> is_parallelizable(optional_aggregator);
-ensures optional_aggregator_value(optional_aggregator) == 0;
-
- - - - - -### Function `switch_to_integer_and_zero_out` - - -
fun switch_to_integer_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator): u128
+
public fun switch(_optional_aggregator: &mut optional_aggregator::OptionalAggregator)
 
-The aggregator exists and the integer dosex not exist when Switches from parallelizable to non-parallelizable implementation. -
let limit = aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator));
-aborts_if len(optional_aggregator.aggregator.vec) == 0;
-aborts_if len(optional_aggregator.integer.vec) != 0;
-ensures !is_parallelizable(optional_aggregator);
-ensures option::borrow(optional_aggregator.integer).limit == limit;
-ensures option::borrow(optional_aggregator.integer).value == 0;
-
- - - - - -### Function `switch_to_aggregator_and_zero_out` - - -
fun switch_to_aggregator_and_zero_out(optional_aggregator: &mut optional_aggregator::OptionalAggregator): u128
-
- - -The integer exists and the aggregator does not exist when Switches from non-parallelizable to parallelizable implementation. -The AggregatorFactory is under the @aptos_framework. - - -
let limit = option::borrow(optional_aggregator.integer).limit;
-aborts_if len(optional_aggregator.integer.vec) == 0;
-aborts_if !exists<aggregator_factory::AggregatorFactory>(@aptos_framework);
-aborts_if len(optional_aggregator.aggregator.vec) != 0;
-ensures is_parallelizable(optional_aggregator);
-ensures aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator)) == limit;
-ensures aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator)) == 0;
+
aborts_if true;
 
@@ -1095,9 +934,9 @@ The integer exists and the aggregator does not exist when destroy the integer. aborts_if is_parallelizable(optional_aggregator) && (aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator)) + value > aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator))); aborts_if is_parallelizable(optional_aggregator) && (aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator)) - + value > MAX_U128); + + value > MAX_U128); aborts_if !is_parallelizable(optional_aggregator) && - (option::borrow(optional_aggregator.integer).value + value > MAX_U128); + (option::borrow(optional_aggregator.integer).value + value > MAX_U128); aborts_if !is_parallelizable(optional_aggregator) && (value > (option::borrow(optional_aggregator.integer).limit - option::borrow(optional_aggregator.integer).value)); } diff --git a/aptos-move/framework/aptos-framework/doc/ordered_map.md b/aptos-move/framework/aptos-framework/doc/ordered_map.md new file mode 100644 index 0000000000000..78995bdbb8acc --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/ordered_map.md @@ -0,0 +1,1848 @@ + + + +# Module `0x1::ordered_map` + +This module provides an implementation for an ordered map. + +Keys point to values, and each key in the map must be unique. + +Currently, one implementation is provided, backed by a single sorted vector. + +That means that keys can be found within O(log N) time. +Adds and removals take O(N) time, but the constant factor is small, +as it does only O(log N) comparisons, and does efficient mem-copy with vector operations. + +Additionally, it provides a way to lookup and iterate over sorted keys, making range query +take O(log N + R) time (where R is number of elements in the range). + +Most methods operate with OrderedMap being self. +All methods that start with iter_*, operate with IteratorPtr being self. + +Uses cmp::compare for ordering, which compares primitive types natively, and uses common +lexicographical sorting for complex types. + +TODO: all iterator functions are public(friend) for now, so that they can be modified in a +backward incompatible way. Type is also named IteratorPtr, so that Iterator is free to use later. +They are waiting for Move improvement that will allow references to be part of the struct, +allowing cleaner iterator APIs. + + +- [Struct `Entry`](#0x1_ordered_map_Entry) +- [Enum `OrderedMap`](#0x1_ordered_map_OrderedMap) +- [Enum `IteratorPtr`](#0x1_ordered_map_IteratorPtr) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_ordered_map_new) +- [Function `new_from`](#0x1_ordered_map_new_from) +- [Function `length`](#0x1_ordered_map_length) +- [Function `is_empty`](#0x1_ordered_map_is_empty) +- [Function `add`](#0x1_ordered_map_add) +- [Function `upsert`](#0x1_ordered_map_upsert) +- [Function `remove`](#0x1_ordered_map_remove) +- [Function `contains`](#0x1_ordered_map_contains) +- [Function `borrow`](#0x1_ordered_map_borrow) +- [Function `borrow_mut`](#0x1_ordered_map_borrow_mut) +- [Function `replace_key_inplace`](#0x1_ordered_map_replace_key_inplace) +- [Function `add_all`](#0x1_ordered_map_add_all) +- [Function `upsert_all`](#0x1_ordered_map_upsert_all) +- [Function `append`](#0x1_ordered_map_append) +- [Function `append_disjoint`](#0x1_ordered_map_append_disjoint) +- [Function `append_impl`](#0x1_ordered_map_append_impl) +- [Function `trim`](#0x1_ordered_map_trim) +- [Function `borrow_front`](#0x1_ordered_map_borrow_front) +- [Function `borrow_back`](#0x1_ordered_map_borrow_back) +- [Function `pop_front`](#0x1_ordered_map_pop_front) +- [Function `pop_back`](#0x1_ordered_map_pop_back) +- [Function `prev_key`](#0x1_ordered_map_prev_key) +- [Function `next_key`](#0x1_ordered_map_next_key) +- [Function `lower_bound`](#0x1_ordered_map_lower_bound) +- [Function `find`](#0x1_ordered_map_find) +- [Function `new_begin_iter`](#0x1_ordered_map_new_begin_iter) +- [Function `new_end_iter`](#0x1_ordered_map_new_end_iter) +- [Function `iter_next`](#0x1_ordered_map_iter_next) +- [Function `iter_prev`](#0x1_ordered_map_iter_prev) +- [Function `iter_is_begin`](#0x1_ordered_map_iter_is_begin) +- [Function `iter_is_begin_from_non_empty`](#0x1_ordered_map_iter_is_begin_from_non_empty) +- [Function `iter_is_end`](#0x1_ordered_map_iter_is_end) +- [Function `iter_borrow_key`](#0x1_ordered_map_iter_borrow_key) +- [Function `iter_borrow`](#0x1_ordered_map_iter_borrow) +- [Function `iter_borrow_mut`](#0x1_ordered_map_iter_borrow_mut) +- [Function `iter_remove`](#0x1_ordered_map_iter_remove) +- [Function `iter_replace`](#0x1_ordered_map_iter_replace) +- [Function `iter_add`](#0x1_ordered_map_iter_add) +- [Function `destroy_empty`](#0x1_ordered_map_destroy_empty) +- [Function `keys`](#0x1_ordered_map_keys) +- [Function `values`](#0x1_ordered_map_values) +- [Function `to_vec_pair`](#0x1_ordered_map_to_vec_pair) +- [Function `destroy`](#0x1_ordered_map_destroy) +- [Function `for_each`](#0x1_ordered_map_for_each) +- [Function `for_each_ref`](#0x1_ordered_map_for_each_ref) +- [Function `for_each_ref_friend`](#0x1_ordered_map_for_each_ref_friend) +- [Function `for_each_mut`](#0x1_ordered_map_for_each_mut) +- [Function `new_iter`](#0x1_ordered_map_new_iter) +- [Function `binary_search`](#0x1_ordered_map_binary_search) +- [Specification](#@Specification_1) + + +
use 0x1::cmp;
+use 0x1::error;
+use 0x1::option;
+use 0x1::vector;
+
+ + + + + +## Struct `Entry` + +Individual entry holding (key, value) pair + + +
struct Entry<K, V> has copy, drop, store
+
+ + + +
+Fields + + +
+
+key: K +
+
+ +
+
+value: V +
+
+ +
+
+ + +
+ + + +## Enum `OrderedMap` + +The OrderedMap datastructure. + + +
enum OrderedMap<K, V> has copy, drop, store
+
+ + + +
+Variants + + +
+SortedVectorMap + + +
+Fields + + +
+
+entries: vector<ordered_map::Entry<K, V>> +
+
+ List of entries, sorted by key. +
+
+ + +
+ +
+ +
+ + + +## Enum `IteratorPtr` + +An iterator pointing to a valid position in an ordered map, or to the end. + +TODO: Once fields can be (mutable) references, this class will be deprecated. + + +
enum IteratorPtr has copy, drop
+
+ + + +
+Variants + + +
+End + + +
+Fields + + +
+
+ + +
+ +
+ +
+Position + + +
+Fields + + +
+
+index: u64 +
+
+ The index of the iterator pointing to. +
+
+ + +
+ +
+ +
+ + + +## Constants + + + + + + +
const EITER_OUT_OF_BOUNDS: u64 = 3;
+
+ + + + + +Map key already exists + + +
const EKEY_ALREADY_EXISTS: u64 = 1;
+
+ + + + + +Map key is not found + + +
const EKEY_NOT_FOUND: u64 = 2;
+
+ + + + + +New key used in replace_key_inplace doesn't respect the order + + +
const ENEW_KEY_NOT_IN_ORDER: u64 = 4;
+
+ + + + + +## Function `new` + +Create a new empty OrderedMap, using default (SortedVectorMap) implementation. + + +
public fun new<K, V>(): ordered_map::OrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun new<K, V>(): OrderedMap<K, V> {
+    OrderedMap::SortedVectorMap {
+        entries: vector::empty(),
+    }
+}
+
+ + + +
+ + + +## Function `new_from` + +Create a OrderedMap from a vector of keys and values. +Aborts with EKEY_ALREADY_EXISTS if duplicate keys are passed in. + + +
public fun new_from<K, V>(keys: vector<K>, values: vector<V>): ordered_map::OrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun new_from<K, V>(keys: vector<K>, values: vector<V>): OrderedMap<K, V> {
+    let map = new();
+    map.add_all(keys, values);
+    map
+}
+
+ + + +
+ + + +## Function `length` + +Number of elements in the map. + + +
public fun length<K, V>(self: &ordered_map::OrderedMap<K, V>): u64
+
+ + + +
+Implementation + + +
public fun length<K, V>(self: &OrderedMap<K, V>): u64 {
+    self.entries.length()
+}
+
+ + + +
+ + + +## Function `is_empty` + +Whether map is empty. + + +
public fun is_empty<K, V>(self: &ordered_map::OrderedMap<K, V>): bool
+
+ + + +
+Implementation + + +
public fun is_empty<K, V>(self: &OrderedMap<K, V>): bool {
+    self.entries.is_empty()
+}
+
+ + + +
+ + + +## Function `add` + +Add a key/value pair to the map. +Aborts with EKEY_ALREADY_EXISTS if key already exist. + + +
public fun add<K, V>(self: &mut ordered_map::OrderedMap<K, V>, key: K, value: V)
+
+ + + +
+Implementation + + +
public fun add<K, V>(self: &mut OrderedMap<K, V>, key: K, value: V) {
+    let len = self.entries.length();
+    let index = binary_search(&key, &self.entries, 0, len);
+
+    // key must not already be inside.
+    assert!(index >= len || &self.entries[index].key != &key, error::invalid_argument(EKEY_ALREADY_EXISTS));
+    self.entries.insert(index, Entry { key, value });
+}
+
+ + + +
+ + + +## Function `upsert` + +If the key doesn't exist in the map, inserts the key/value, and returns none. +Otherwise, updates the value under the given key, and returns the old value. + + +
public fun upsert<K: drop, V>(self: &mut ordered_map::OrderedMap<K, V>, key: K, value: V): option::Option<V>
+
+ + + +
+Implementation + + +
public fun upsert<K: drop, V>(self: &mut OrderedMap<K, V>, key: K, value: V): Option<V> {
+    let len = self.entries.length();
+    let index = binary_search(&key, &self.entries, 0, len);
+
+    if (index < len && &self.entries[index].key == &key) {
+        let Entry {
+            key: _,
+            value: old_value,
+        } = self.entries.replace(index, Entry { key, value });
+        option::some(old_value)
+    } else {
+        self.entries.insert(index, Entry { key, value });
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `remove` + +Remove a key/value pair from the map. +Aborts with EKEY_NOT_FOUND if key doesn't exist. + + +
public fun remove<K: drop, V>(self: &mut ordered_map::OrderedMap<K, V>, key: &K): V
+
+ + + +
+Implementation + + +
public fun remove<K: drop, V>(self: &mut OrderedMap<K, V>, key: &K): V {
+    let len = self.entries.length();
+    let index = binary_search(key, &self.entries, 0, len);
+    assert!(index < len, error::invalid_argument(EKEY_NOT_FOUND));
+    let Entry { key: old_key, value } = self.entries.remove(index);
+    assert!(key == &old_key, error::invalid_argument(EKEY_NOT_FOUND));
+    value
+}
+
+ + + +
+ + + +## Function `contains` + +Returns whether map contains a given key. + + +
public fun contains<K, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): bool
+
+ + + +
+Implementation + + +
public fun contains<K, V>(self: &OrderedMap<K, V>, key: &K): bool {
+    !self.find(key).iter_is_end(self)
+}
+
+ + + +
+ + + +## Function `borrow` + + + +
public fun borrow<K, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): &V
+
+ + + +
+Implementation + + +
public fun borrow<K, V>(self: &OrderedMap<K, V>, key: &K): &V {
+    self.find(key).iter_borrow(self)
+}
+
+ + + +
+ + + +## Function `borrow_mut` + + + +
public fun borrow_mut<K, V>(self: &mut ordered_map::OrderedMap<K, V>, key: &K): &mut V
+
+ + + +
+Implementation + + +
public fun borrow_mut<K, V>(self: &mut OrderedMap<K, V>, key: &K): &mut V {
+    self.find(key).iter_borrow_mut(self)
+}
+
+ + + +
+ + + +## Function `replace_key_inplace` + +Changes the key, while keeping the same value attached to it +Aborts with EKEY_NOT_FOUND if old_key doesn't exist. +Aborts with ENEW_KEY_NOT_IN_ORDER if new_key doesn't keep the order old_key was in. + + +
public(friend) fun replace_key_inplace<K: drop, V>(self: &mut ordered_map::OrderedMap<K, V>, old_key: &K, new_key: K)
+
+ + + +
+Implementation + + +
public(friend) fun replace_key_inplace<K: drop, V>(self: &mut OrderedMap<K, V>, old_key: &K, new_key: K) {
+    let len = self.entries.length();
+    let index = binary_search(old_key, &self.entries, 0, len);
+    assert!(index < len, error::invalid_argument(EKEY_NOT_FOUND));
+
+    assert!(old_key == &self.entries[index].key, error::invalid_argument(EKEY_NOT_FOUND));
+
+    // check that after we update the key, order is going to be respected
+    if (index > 0) {
+        assert!(cmp::compare(&self.entries[index - 1].key, &new_key).is_lt(), error::invalid_argument(ENEW_KEY_NOT_IN_ORDER))
+    };
+
+    if (index + 1 < len) {
+        assert!(cmp::compare(&new_key, &self.entries[index + 1].key).is_lt(), error::invalid_argument(ENEW_KEY_NOT_IN_ORDER))
+    };
+
+    self.entries[index].key = new_key;
+}
+
+ + + +
+ + + +## Function `add_all` + +Add multiple key/value pairs to the map. The keys must not already exist. +Aborts with EKEY_ALREADY_EXISTS if key already exist, or duplicate keys are passed in. + + +
public fun add_all<K, V>(self: &mut ordered_map::OrderedMap<K, V>, keys: vector<K>, values: vector<V>)
+
+ + + +
+Implementation + + +
public fun add_all<K, V>(self: &mut OrderedMap<K, V>, keys: vector<K>, values: vector<V>) {
+    // TODO: Can be optimized, by sorting keys and values, and then creating map.
+    keys.zip(values, |key, value| {
+        self.add(key, value);
+    });
+}
+
+ + + +
+ + + +## Function `upsert_all` + +Add multiple key/value pairs to the map, overwrites values if they exist already, +or if duplicate keys are passed in. + + +
public fun upsert_all<K: drop, V: drop>(self: &mut ordered_map::OrderedMap<K, V>, keys: vector<K>, values: vector<V>)
+
+ + + +
+Implementation + + +
public fun upsert_all<K: drop, V: drop>(self: &mut OrderedMap<K, V>, keys: vector<K>, values: vector<V>) {
+    // TODO: Can be optimized, by sorting keys and values, and then creating map.
+    keys.zip(values, |key, value| {
+        self.upsert(key, value);
+    });
+}
+
+ + + +
+ + + +## Function `append` + +Takes all elements from other and adds them to self, +overwritting if any key is already present in self. + + +
public fun append<K: drop, V: drop>(self: &mut ordered_map::OrderedMap<K, V>, other: ordered_map::OrderedMap<K, V>)
+
+ + + +
+Implementation + + +
public fun append<K: drop, V: drop>(self: &mut OrderedMap<K, V>, other: OrderedMap<K, V>) {
+    self.append_impl(other);
+}
+
+ + + +
+ + + +## Function `append_disjoint` + +Takes all elements from other and adds them to self. +Aborts with EKEY_ALREADY_EXISTS if other has a key already present in self. + + +
public fun append_disjoint<K, V>(self: &mut ordered_map::OrderedMap<K, V>, other: ordered_map::OrderedMap<K, V>)
+
+ + + +
+Implementation + + +
public fun append_disjoint<K, V>(self: &mut OrderedMap<K, V>, other: OrderedMap<K, V>) {
+    let overwritten = self.append_impl(other);
+    assert!(overwritten.length() == 0, error::invalid_argument(EKEY_ALREADY_EXISTS));
+    overwritten.destroy_empty();
+}
+
+ + + +
+ + + +## Function `append_impl` + +Takes all elements from other and adds them to self, returning list of entries in self that were overwritten. + + +
fun append_impl<K, V>(self: &mut ordered_map::OrderedMap<K, V>, other: ordered_map::OrderedMap<K, V>): vector<ordered_map::Entry<K, V>>
+
+ + + +
+Implementation + + +
fun append_impl<K, V>(self: &mut OrderedMap<K, V>, other: OrderedMap<K, V>): vector<Entry<K,V>> {
+    let OrderedMap::SortedVectorMap {
+        entries: other_entries,
+    } = other;
+    let overwritten = vector::empty();
+
+    if (other_entries.is_empty()) {
+        other_entries.destroy_empty();
+        return overwritten;
+    };
+
+    if (self.entries.is_empty()) {
+        self.entries.append(other_entries);
+        return overwritten;
+    };
+
+    // Optimization: if all elements in `other` are larger than all elements in `self`, we can just move them over.
+    if (cmp::compare(&self.entries.borrow(self.entries.length() - 1).key, &other_entries.borrow(0).key).is_lt()) {
+        self.entries.append(other_entries);
+        return overwritten;
+    };
+
+    // In O(n), traversing from the back, build reverse sorted result, and then reverse it back
+    let reverse_result = vector::empty();
+    let cur_i = self.entries.length() - 1;
+    let other_i = other_entries.length() - 1;
+
+    // after the end of the loop, other_entries is empty, and any leftover is in entries
+    loop {
+        let ord = cmp::compare(&self.entries[cur_i].key, &other_entries[other_i].key);
+        if (ord.is_gt()) {
+            reverse_result.push_back(self.entries.pop_back());
+            if (cur_i == 0) {
+                // make other_entries empty, and rest in entries.
+                // TODO cannot use mem::swap until it is public/released
+                // mem::swap(&mut self.entries, &mut other_entries);
+                self.entries.append(other_entries);
+                break;
+            } else {
+                cur_i -= 1;
+            };
+        } else {
+            // is_lt or is_eq
+            if (ord.is_eq()) {
+                // we skip the entries one, and below put in the result one from other.
+                overwritten.push_back(self.entries.pop_back());
+            };
+
+            reverse_result.push_back(other_entries.pop_back());
+            if (other_i == 0) {
+                other_entries.destroy_empty();
+                break;
+            } else {
+                other_i -= 1;
+            };
+        };
+    };
+
+    self.entries.reverse_append(reverse_result);
+
+    overwritten
+}
+
+ + + +
+ + + +## Function `trim` + +Splits the collection into two, such to leave self with at number of elements. +Returns a newly allocated map containing the elements in the range [at, len). +After the call, the original map will be left containing the elements [0, at). + + +
public fun trim<K, V>(self: &mut ordered_map::OrderedMap<K, V>, at: u64): ordered_map::OrderedMap<K, V>
+
+ + + +
+Implementation + + +
public fun trim<K, V>(self: &mut OrderedMap<K, V>, at: u64): OrderedMap<K, V> {
+    let rest = self.entries.trim(at);
+
+    OrderedMap::SortedVectorMap {
+        entries: rest
+    }
+}
+
+ + + +
+ + + +## Function `borrow_front` + + + +
public fun borrow_front<K, V>(self: &ordered_map::OrderedMap<K, V>): (&K, &V)
+
+ + + +
+Implementation + + +
public fun borrow_front<K, V>(self: &OrderedMap<K, V>): (&K, &V) {
+    let entry = self.entries.borrow(0);
+    (&entry.key, &entry.value)
+}
+
+ + + +
+ + + +## Function `borrow_back` + + + +
public fun borrow_back<K, V>(self: &ordered_map::OrderedMap<K, V>): (&K, &V)
+
+ + + +
+Implementation + + +
public fun borrow_back<K, V>(self: &OrderedMap<K, V>): (&K, &V) {
+    let entry = self.entries.borrow(self.entries.length() - 1);
+    (&entry.key, &entry.value)
+}
+
+ + + +
+ + + +## Function `pop_front` + + + +
public fun pop_front<K, V>(self: &mut ordered_map::OrderedMap<K, V>): (K, V)
+
+ + + +
+Implementation + + +
public fun pop_front<K, V>(self: &mut OrderedMap<K, V>): (K, V) {
+    let Entry { key, value } = self.entries.remove(0);
+    (key, value)
+}
+
+ + + +
+ + + +## Function `pop_back` + + + +
public fun pop_back<K, V>(self: &mut ordered_map::OrderedMap<K, V>): (K, V)
+
+ + + +
+Implementation + + +
public fun pop_back<K, V>(self: &mut OrderedMap<K, V>): (K, V) {
+    let Entry { key, value } = self.entries.pop_back();
+    (key, value)
+}
+
+ + + +
+ + + +## Function `prev_key` + + + +
public fun prev_key<K: copy, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): option::Option<K>
+
+ + + +
+Implementation + + +
public fun prev_key<K: copy, V>(self: &OrderedMap<K, V>, key: &K): Option<K> {
+    let it = self.lower_bound(key);
+    if (it.iter_is_begin(self)) {
+        option::none()
+    } else {
+        option::some(*it.iter_prev(self).iter_borrow_key(self))
+    }
+}
+
+ + + +
+ + + +## Function `next_key` + + + +
public fun next_key<K: copy, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): option::Option<K>
+
+ + + +
+Implementation + + +
public fun next_key<K: copy, V>(self: &OrderedMap<K, V>, key: &K): Option<K> {
+    let it = self.lower_bound(key);
+    if (it.iter_is_end(self)) {
+        option::none()
+    } else {
+        let cur_key = it.iter_borrow_key(self);
+        if (key == cur_key) {
+            let it = it.iter_next(self);
+            if (it.iter_is_end(self)) {
+                option::none()
+            } else {
+                option::some(*it.iter_borrow_key(self))
+            }
+        } else {
+            option::some(*cur_key)
+        }
+    }
+}
+
+ + + +
+ + + +## Function `lower_bound` + +Returns an iterator pointing to the first element that is greater or equal to the provided +key, or an end iterator if such element doesn't exist. + + +
public(friend) fun lower_bound<K, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): ordered_map::IteratorPtr
+
+ + + +
+Implementation + + +
public(friend) fun lower_bound<K, V>(self: &OrderedMap<K, V>, key: &K): IteratorPtr {
+    let entries = &self.entries;
+    let len = entries.length();
+
+    let index = binary_search(key, entries, 0, len);
+    if (index == len) {
+        self.new_end_iter()
+    } else {
+        new_iter(index)
+    }
+}
+
+ + + +
+ + + +## Function `find` + +Returns an iterator pointing to the element that equals to the provided key, or an end +iterator if the key is not found. + + +
public(friend) fun find<K, V>(self: &ordered_map::OrderedMap<K, V>, key: &K): ordered_map::IteratorPtr
+
+ + + +
+Implementation + + +
public(friend) fun find<K, V>(self: &OrderedMap<K, V>, key: &K): IteratorPtr {
+    let lower_bound = self.lower_bound(key);
+    if (lower_bound.iter_is_end(self)) {
+        lower_bound
+    } else if (lower_bound.iter_borrow_key(self) == key) {
+        lower_bound
+    } else {
+        self.new_end_iter()
+    }
+}
+
+ + + +
+ + + +## Function `new_begin_iter` + +Returns the begin iterator. + + +
public(friend) fun new_begin_iter<K, V>(self: &ordered_map::OrderedMap<K, V>): ordered_map::IteratorPtr
+
+ + + +
+Implementation + + +
public(friend) fun new_begin_iter<K, V>(self: &OrderedMap<K, V>): IteratorPtr {
+    if (self.is_empty()) {
+        return IteratorPtr::End;
+    };
+
+    new_iter(0)
+}
+
+ + + +
+ + + +## Function `new_end_iter` + +Returns the end iterator. + + +
public(friend) fun new_end_iter<K, V>(self: &ordered_map::OrderedMap<K, V>): ordered_map::IteratorPtr
+
+ + + +
+Implementation + + +
public(friend) fun new_end_iter<K, V>(self: &OrderedMap<K, V>): IteratorPtr {
+    IteratorPtr::End
+}
+
+ + + +
+ + + +## Function `iter_next` + +Returns the next iterator, or none if already at the end iterator. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_next<K, V>(self: ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): ordered_map::IteratorPtr
+
+ + + +
+Implementation + + +
public(friend) fun iter_next<K, V>(self: IteratorPtr, map: &OrderedMap<K, V>): IteratorPtr {
+    assert!(!self.iter_is_end(map), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+
+    let index = self.index + 1;
+    if (index < map.entries.length()) {
+        new_iter(index)
+    } else {
+        map.new_end_iter()
+    }
+}
+
+ + + +
+ + + +## Function `iter_prev` + +Returns the previous iterator, or none if already at the begin iterator. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_prev<K, V>(self: ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): ordered_map::IteratorPtr
+
+ + + +
+Implementation + + +
public(friend) fun iter_prev<K, V>(self: IteratorPtr, map: &OrderedMap<K, V>): IteratorPtr {
+    assert!(!self.iter_is_begin(map), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+
+    let index = if (self is IteratorPtr::End) {
+        map.entries.length() - 1
+    } else {
+        self.index - 1
+    };
+
+    new_iter(index)
+}
+
+ + + +
+ + + +## Function `iter_is_begin` + +Returns whether the iterator is a begin iterator. + + +
public(friend) fun iter_is_begin<K, V>(self: &ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): bool
+
+ + + +
+Implementation + + +
public(friend) fun iter_is_begin<K, V>(self: &IteratorPtr, map: &OrderedMap<K, V>): bool {
+    if (self is IteratorPtr::End) {
+        map.is_empty()
+    } else {
+        self.index == 0
+    }
+}
+
+ + + +
+ + + +## Function `iter_is_begin_from_non_empty` + +Returns true iff the iterator is a begin iterator from a non-empty collection. +(I.e. if iterator points to a valid element) +This method doesn't require having access to map, unlike iter_is_begin. + + +
public(friend) fun iter_is_begin_from_non_empty(self: &ordered_map::IteratorPtr): bool
+
+ + + +
+Implementation + + +
public(friend) fun iter_is_begin_from_non_empty(self: &IteratorPtr): bool {
+    if (self is IteratorPtr::End) {
+        false
+    } else {
+        self.index == 0
+    }
+}
+
+ + + +
+ + + +## Function `iter_is_end` + +Returns whether the iterator is an end iterator. + + +
public(friend) fun iter_is_end<K, V>(self: &ordered_map::IteratorPtr, _map: &ordered_map::OrderedMap<K, V>): bool
+
+ + + +
+Implementation + + +
public(friend) fun iter_is_end<K, V>(self: &IteratorPtr, _map: &OrderedMap<K, V>): bool {
+    self is IteratorPtr::End
+}
+
+ + + +
+ + + +## Function `iter_borrow_key` + +Borrows the key given iterator points to. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_borrow_key<K, V>(self: &ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): &K
+
+ + + +
+Implementation + + +
public(friend) fun iter_borrow_key<K, V>(self: &IteratorPtr, map: &OrderedMap<K, V>): &K {
+    assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+
+    &map.entries.borrow(self.index).key
+}
+
+ + + +
+ + + +## Function `iter_borrow` + +Borrows the value given iterator points to. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_borrow<K, V>(self: ordered_map::IteratorPtr, map: &ordered_map::OrderedMap<K, V>): &V
+
+ + + +
+Implementation + + +
public(friend) fun iter_borrow<K, V>(self: IteratorPtr, map: &OrderedMap<K, V>): &V {
+    assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+    &map.entries.borrow(self.index).value
+}
+
+ + + +
+ + + +## Function `iter_borrow_mut` + +Mutably borrows the value iterator points to. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_borrow_mut<K, V>(self: ordered_map::IteratorPtr, map: &mut ordered_map::OrderedMap<K, V>): &mut V
+
+ + + +
+Implementation + + +
public(friend) fun iter_borrow_mut<K, V>(self: IteratorPtr, map: &mut OrderedMap<K, V>): &mut V {
+    assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+    &mut map.entries.borrow_mut(self.index).value
+}
+
+ + + +
+ + + +## Function `iter_remove` + +Removes (key, value) pair iterator points to, returning the previous value. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_remove<K: drop, V>(self: ordered_map::IteratorPtr, map: &mut ordered_map::OrderedMap<K, V>): V
+
+ + + +
+Implementation + + +
public(friend) fun iter_remove<K: drop, V>(self: IteratorPtr, map: &mut OrderedMap<K, V>): V {
+    assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+
+    let Entry { key: _, value } = map.entries.remove(self.index);
+    value
+}
+
+ + + +
+ + + +## Function `iter_replace` + +Replaces the value iterator is pointing to, returning the previous value. +Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. +Note: Requires that the map is not changed after the input iterator is generated. + + +
public(friend) fun iter_replace<K: copy, drop, V>(self: ordered_map::IteratorPtr, map: &mut ordered_map::OrderedMap<K, V>, value: V): V
+
+ + + +
+Implementation + + +
public(friend) fun iter_replace<K: copy + drop, V>(self: IteratorPtr, map: &mut OrderedMap<K, V>, value: V): V {
+    assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS));
+
+    // TODO once mem::replace is public/released, update to:
+    // let entry = map.entries.borrow_mut(self.index);
+    // mem::replace(&mut entry.value, value)
+    let key = map.entries[self.index].key;
+    let Entry {
+        key: _,
+        value: prev_value,
+    } = map.entries.replace(self.index, Entry { key, value });
+    prev_value
+}
+
+ + + +
+ + + +## Function `iter_add` + +Add key/value pair to the map, at the iterator position (before the element at the iterator position). +Aborts with ENEW_KEY_NOT_IN_ORDER is key is not larger than the key before the iterator, +or smaller than the key at the iterator position. + + +
public(friend) fun iter_add<K, V>(self: ordered_map::IteratorPtr, map: &mut ordered_map::OrderedMap<K, V>, key: K, value: V)
+
+ + + +
+Implementation + + +
public(friend) fun iter_add<K, V>(self: IteratorPtr, map: &mut OrderedMap<K, V>, key: K, value: V) {
+    let len = map.entries.length();
+    let insert_index = if (self is IteratorPtr::End) {
+        len
+    } else {
+        self.index
+    };
+
+    if (insert_index > 0) {
+        assert!(cmp::compare(&map.entries[insert_index - 1].key, &key).is_lt(), error::invalid_argument(ENEW_KEY_NOT_IN_ORDER))
+    };
+
+    if (insert_index < len) {
+        assert!(cmp::compare(&key, &map.entries[insert_index].key).is_lt(), error::invalid_argument(ENEW_KEY_NOT_IN_ORDER))
+    };
+
+    map.entries.insert(insert_index, Entry { key, value });
+}
+
+ + + +
+ + + +## Function `destroy_empty` + +Destroys empty map. +Aborts if self is not empty. + + +
public fun destroy_empty<K, V>(self: ordered_map::OrderedMap<K, V>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<K, V>(self: OrderedMap<K, V>) {
+    let OrderedMap::SortedVectorMap { entries } = self;
+    // assert!(entries.is_empty(), E_NOT_EMPTY);
+    entries.destroy_empty();
+}
+
+ + + +
+ + + +## Function `keys` + +Return all keys in the map. This requires keys to be copyable. + + +
public fun keys<K: copy, V>(self: &ordered_map::OrderedMap<K, V>): vector<K>
+
+ + + +
+Implementation + + +
public fun keys<K: copy, V>(self: &OrderedMap<K, V>): vector<K> {
+    self.entries.map_ref(|e| {
+        let e: &Entry<K, V> = e;
+        e.key
+    })
+}
+
+ + + +
+ + + +## Function `values` + +Return all values in the map. This requires values to be copyable. + + +
public fun values<K, V: copy>(self: &ordered_map::OrderedMap<K, V>): vector<V>
+
+ + + +
+Implementation + + +
public fun values<K, V: copy>(self: &OrderedMap<K, V>): vector<V> {
+    self.entries.map_ref(|e| {
+        let e: &Entry<K, V> = e;
+        e.value
+    })
+}
+
+ + + +
+ + + +## Function `to_vec_pair` + +Transform the map into two vectors with the keys and values respectively +Primarily used to destroy a map + + +
public fun to_vec_pair<K, V>(self: ordered_map::OrderedMap<K, V>): (vector<K>, vector<V>)
+
+ + + +
+Implementation + + +
public fun to_vec_pair<K, V>(self: OrderedMap<K, V>): (vector<K>, vector<V>) {
+    let keys: vector<K> = vector::empty();
+    let values: vector<V> = vector::empty();
+    let OrderedMap::SortedVectorMap { entries } = self;
+    entries.for_each(|e| {
+        let Entry { key, value } = e;
+        keys.push_back(key);
+        values.push_back(value);
+    });
+    (keys, values)
+}
+
+ + + +
+ + + +## Function `destroy` + +For maps that cannot be dropped this is a utility to destroy them +using lambdas to destroy the individual keys and values. + + +
public fun destroy<K, V>(self: ordered_map::OrderedMap<K, V>, dk: |K|, dv: |V|)
+
+ + + +
+Implementation + + +
public inline fun destroy<K, V>(
+    self: OrderedMap<K, V>,
+    dk: |K|,
+    dv: |V|
+) {
+    let (keys, values) = self.to_vec_pair();
+    keys.destroy(|_k| dk(_k));
+    values.destroy(|_v| dv(_v));
+}
+
+ + + +
+ + + +## Function `for_each` + +Apply the function to each key-value pair in the map, consuming it. + + +
public fun for_each<K, V>(self: ordered_map::OrderedMap<K, V>, f: |(K, V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each<K, V>(
+    self: OrderedMap<K, V>,
+    f: |K, V|
+) {
+    let (keys, values) = self.to_vec_pair();
+    keys.zip(values, |k, v| f(k, v));
+}
+
+ + + +
+ + + +## Function `for_each_ref` + +Apply the function to a reference of each key-value pair in the map. + +Current implementation is O(n * log(n)). After function values will be optimized +to O(n). + + +
public fun for_each_ref<K: copy, drop, V>(self: &ordered_map::OrderedMap<K, V>, f: |(&K, &V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each_ref<K: copy + drop, V>(self: &OrderedMap<K, V>, f: |&K, &V|) {
+    // This implementation is innefficient: O(log(n)) for next_key / borrow lookups every time,
+    // but is the only one available through the public API.
+    if (!self.is_empty()) {
+        let (k, v) = self.borrow_front();
+        f(k, v);
+
+        let cur_k = self.next_key(k);
+        while (cur_k.is_some()) {
+            let k = cur_k.destroy_some();
+            f(&k, self.borrow(&k));
+
+            cur_k = self.next_key(&k);
+        };
+    };
+
+    // TODO: if we make iterator api public update to:
+    // let iter = self.new_begin_iter();
+    // while (!iter.iter_is_end(self)) {
+    //     f(iter.iter_borrow_key(self), iter.iter_borrow(self));
+    //     iter = iter.iter_next(self);
+    // }
+
+    // TODO: once move supports private functions udpate to:
+    // vector::for_each_ref(
+    //     &self.entries,
+    //     |entry| {
+    //         f(&entry.key, &entry.value)
+    //     }
+    // );
+}
+
+ + + +
+ + + +## Function `for_each_ref_friend` + + + +
public(friend) fun for_each_ref_friend<K: copy, drop, V>(self: &ordered_map::OrderedMap<K, V>, f: |(&K, &V)|)
+
+ + + +
+Implementation + + +
public(friend) inline fun for_each_ref_friend<K: copy + drop, V>(self: &OrderedMap<K, V>, f: |&K, &V|) {
+    let iter = self.new_begin_iter();
+    while (!iter.iter_is_end(self)) {
+        f(iter.iter_borrow_key(self), iter.iter_borrow(self));
+        iter = iter.iter_next(self);
+    }
+}
+
+ + + +
+ + + +## Function `for_each_mut` + +Apply the function to a mutable reference of each key-value pair in the map. + +Current implementation is O(n * log(n)). After function values will be optimized +to O(n). + + +
public fun for_each_mut<K: copy, drop, V>(self: &mut ordered_map::OrderedMap<K, V>, f: |(&K, &mut V)|)
+
+ + + +
+Implementation + + +
public inline fun for_each_mut<K: copy + drop, V>(self: &mut OrderedMap<K, V>, f: |&K, &mut V|) {
+    // This implementation is innefficient: O(log(n)) for next_key / borrow lookups every time,
+    // but is the only one available through the public API.
+    if (!self.is_empty()) {
+        let (k, _v) = self.borrow_front();
+
+        let k = *k;
+        let done = false;
+        while (!done) {
+            f(&k, self.borrow_mut(&k));
+
+            let cur_k = self.next_key(&k);
+            if (cur_k.is_some()) {
+                k = cur_k.destroy_some();
+            } else {
+                done = true;
+            }
+        };
+    };
+
+    // TODO: if we make iterator api public update to:
+    // let iter = self.new_begin_iter();
+    // while (!iter.iter_is_end(self)) {
+    //     let key = *iter.iter_borrow_key(self);
+    //     f(key, iter.iter_borrow_mut(self));
+    //     iter = iter.iter_next(self);
+    // }
+
+    // TODO: once move supports private functions udpate to:
+    // vector::for_each_mut(
+    //     &mut self.entries,
+    //     |entry| {
+    //         f(&mut entry.key, &mut entry.value)
+    //     }
+    // );
+}
+
+ + + +
+ + + +## Function `new_iter` + + + +
fun new_iter(index: u64): ordered_map::IteratorPtr
+
+ + + +
+Implementation + + +
inline fun new_iter(index: u64): IteratorPtr {
+    IteratorPtr::Position {
+        index: index,
+    }
+}
+
+ + + +
+ + + +## Function `binary_search` + + + +
fun binary_search<K, V>(key: &K, entries: &vector<ordered_map::Entry<K, V>>, start: u64, end: u64): u64
+
+ + + +
+Implementation + + +
fun binary_search<K, V>(key: &K, entries: &vector<Entry<K, V>>, start: u64, end: u64): u64 {
+    let l = start;
+    let r = end;
+    while (l != r) {
+        let mid = l + ((r - l) >> 1);
+        let comparison = cmp::compare(&entries.borrow(mid).key, key);
+        if (comparison.is_lt()) {
+            l = mid + 1;
+        } else {
+            r = mid;
+        };
+    };
+    l
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/overview.md b/aptos-move/framework/aptos-framework/doc/overview.md index 2605092ef61f5..0316e89d82635 100644 --- a/aptos-move/framework/aptos-framework/doc/overview.md +++ b/aptos-move/framework/aptos-framework/doc/overview.md @@ -13,29 +13,29 @@ This is the reference documentation of the Aptos framework. - [`0x1::account`](account.md#0x1_account) +- [`0x1::account_abstraction`](account_abstraction.md#0x1_account_abstraction) - [`0x1::aggregator`](aggregator.md#0x1_aggregator) - [`0x1::aggregator_factory`](aggregator_factory.md#0x1_aggregator_factory) - [`0x1::aggregator_v2`](aggregator_v2.md#0x1_aggregator_v2) - [`0x1::aptos_account`](aptos_account.md#0x1_aptos_account) - [`0x1::aptos_coin`](aptos_coin.md#0x1_aptos_coin) - [`0x1::aptos_governance`](aptos_governance.md#0x1_aptos_governance) -- [`0x1::atomic_bridge`](atomic_bridge.md#0x1_atomic_bridge) -- [`0x1::atomic_bridge_configuration`](atomic_bridge.md#0x1_atomic_bridge_configuration) -- [`0x1::atomic_bridge_counterparty`](atomic_bridge.md#0x1_atomic_bridge_counterparty) -- [`0x1::atomic_bridge_initiator`](atomic_bridge.md#0x1_atomic_bridge_initiator) -- [`0x1::atomic_bridge_store`](atomic_bridge.md#0x1_atomic_bridge_store) +- [`0x1::auth_data`](auth_data.md#0x1_auth_data) +- [`0x1::base16`](base16.md#0x1_base16) +- [`0x1::big_ordered_map`](big_ordered_map.md#0x1_big_ordered_map) - [`0x1::block`](block.md#0x1_block) - [`0x1::chain_id`](chain_id.md#0x1_chain_id) - [`0x1::chain_status`](chain_status.md#0x1_chain_status) - [`0x1::code`](code.md#0x1_code) - [`0x1::coin`](coin.md#0x1_coin) +- [`0x1::common_account_abstractions_utils`](common_account_abstractions_utils.md#0x1_common_account_abstractions_utils) - [`0x1::config_buffer`](config_buffer.md#0x1_config_buffer) - [`0x1::consensus_config`](consensus_config.md#0x1_consensus_config) - [`0x1::create_signer`](create_signer.md#0x1_create_signer) - [`0x1::delegation_pool`](delegation_pool.md#0x1_delegation_pool) - [`0x1::dispatchable_fungible_asset`](dispatchable_fungible_asset.md#0x1_dispatchable_fungible_asset) - [`0x1::dkg`](dkg.md#0x1_dkg) -- [`0x1::ethereum`](ethereum.md#0x1_ethereum) +- [`0x1::ethereum_derivable_account`](ethereum_derivable_account.md#0x1_ethereum_derivable_account) - [`0x1::event`](event.md#0x1_event) - [`0x1::execution_config`](execution_config.md#0x1_execution_config) - [`0x1::function_info`](function_info.md#0x1_function_info) @@ -43,26 +43,30 @@ This is the reference documentation of the Aptos framework. - [`0x1::gas_schedule`](gas_schedule.md#0x1_gas_schedule) - [`0x1::genesis`](genesis.md#0x1_genesis) - [`0x1::governance_proposal`](governance_proposal.md#0x1_governance_proposal) -- [`0x1::governed_gas_pool`](governed_gas_pool.md#0x1_governed_gas_pool) - [`0x1::guid`](guid.md#0x1_guid) - [`0x1::jwk_consensus_config`](jwk_consensus_config.md#0x1_jwk_consensus_config) - [`0x1::jwks`](jwks.md#0x1_jwks) - [`0x1::keyless_account`](keyless_account.md#0x1_keyless_account) - [`0x1::managed_coin`](managed_coin.md#0x1_managed_coin) - [`0x1::multisig_account`](multisig_account.md#0x1_multisig_account) -- [`0x1::native_bridge`](native_bridge.md#0x1_native_bridge) +- [`0x1::nonce_validation`](nonce_validation.md#0x1_nonce_validation) - [`0x1::object`](object.md#0x1_object) - [`0x1::object_code_deployment`](object_code_deployment.md#0x1_object_code_deployment) - [`0x1::optional_aggregator`](optional_aggregator.md#0x1_optional_aggregator) +- [`0x1::ordered_map`](ordered_map.md#0x1_ordered_map) +- [`0x1::permissioned_delegation`](permissioned_delegation.md#0x1_permissioned_delegation) +- [`0x1::permissioned_signer`](permissioned_signer.md#0x1_permissioned_signer) - [`0x1::primary_fungible_store`](primary_fungible_store.md#0x1_primary_fungible_store) - [`0x1::randomness`](randomness.md#0x1_randomness) - [`0x1::randomness_api_v0_config`](randomness_api_v0_config.md#0x1_randomness_api_v0_config) - [`0x1::randomness_config`](randomness_config.md#0x1_randomness_config) - [`0x1::randomness_config_seqnum`](randomness_config_seqnum.md#0x1_randomness_config_seqnum) +- [`0x1::rate_limiter`](rate_limiter.md#0x1_rate_limiter) - [`0x1::reconfiguration`](reconfiguration.md#0x1_reconfiguration) - [`0x1::reconfiguration_state`](reconfiguration_state.md#0x1_reconfiguration_state) - [`0x1::reconfiguration_with_dkg`](reconfiguration_with_dkg.md#0x1_reconfiguration_with_dkg) - [`0x1::resource_account`](resource_account.md#0x1_resource_account) +- [`0x1::solana_derivable_account`](solana_derivable_account.md#0x1_solana_derivable_account) - [`0x1::stake`](stake.md#0x1_stake) - [`0x1::staking_config`](staking_config.md#0x1_staking_config) - [`0x1::staking_contract`](staking_contract.md#0x1_staking_contract) diff --git a/aptos-move/framework/aptos-framework/doc/permissioned_delegation.md b/aptos-move/framework/aptos-framework/doc/permissioned_delegation.md new file mode 100644 index 0000000000000..75922a2bca965 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/permissioned_delegation.md @@ -0,0 +1,484 @@ + + + +# Module `0x1::permissioned_delegation` + + + +- [Enum `AccountDelegation`](#0x1_permissioned_delegation_AccountDelegation) +- [Enum `DelegationKey`](#0x1_permissioned_delegation_DelegationKey) +- [Resource `RegisteredDelegations`](#0x1_permissioned_delegation_RegisteredDelegations) +- [Constants](#@Constants_0) +- [Function `gen_ed25519_key`](#0x1_permissioned_delegation_gen_ed25519_key) +- [Function `check_txn_rate`](#0x1_permissioned_delegation_check_txn_rate) +- [Function `add_permissioned_handle`](#0x1_permissioned_delegation_add_permissioned_handle) +- [Function `remove_permissioned_handle`](#0x1_permissioned_delegation_remove_permissioned_handle) +- [Function `permissioned_signer_by_key`](#0x1_permissioned_delegation_permissioned_signer_by_key) +- [Function `handle_address_by_key`](#0x1_permissioned_delegation_handle_address_by_key) +- [Function `authenticate`](#0x1_permissioned_delegation_authenticate) +- [Function `get_storable_permissioned_handle`](#0x1_permissioned_delegation_get_storable_permissioned_handle) +- [Specification](#@Specification_1) + + +
use 0x1::auth_data;
+use 0x1::bcs_stream;
+use 0x1::big_ordered_map;
+use 0x1::ed25519;
+use 0x1::error;
+use 0x1::option;
+use 0x1::permissioned_signer;
+use 0x1::rate_limiter;
+use 0x1::signer;
+
+ + + + + +## Enum `AccountDelegation` + + + +
enum AccountDelegation has store
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+handle: permissioned_signer::StorablePermissionedHandle +
+
+ +
+
+rate_limiter: option::Option<rate_limiter::RateLimiter> +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Enum `DelegationKey` + + + +
enum DelegationKey has copy, drop, store
+
+ + + +
+Variants + + +
+Ed25519PublicKey + + +
+Fields + + +
+
+0: ed25519::UnvalidatedPublicKey +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Resource `RegisteredDelegations` + + + +
struct RegisteredDelegations has key
+
+ + + +
+Fields + + +
+
+delegations: big_ordered_map::BigOrderedMap<permissioned_delegation::DelegationKey, permissioned_delegation::AccountDelegation> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ENOT_MASTER_SIGNER: u64 = 1;
+
+ + + + + + + +
const EINVALID_PUBLIC_KEY: u64 = 2;
+
+ + + + + + + +
const EINVALID_SIGNATURE: u64 = 4;
+
+ + + + + + + +
const EDELEGATION_EXISTENCE: u64 = 5;
+
+ + + + + + + +
const EPUBLIC_KEY_NOT_FOUND: u64 = 3;
+
+ + + + + + + +
const ERATE_LIMITED: u64 = 6;
+
+ + + + + +## Function `gen_ed25519_key` + + + +
public fun gen_ed25519_key(key: ed25519::UnvalidatedPublicKey): permissioned_delegation::DelegationKey
+
+ + + +
+Implementation + + +
public fun gen_ed25519_key(key: UnvalidatedPublicKey): DelegationKey {
+    DelegationKey::Ed25519PublicKey(key)
+}
+
+ + + +
+ + + +## Function `check_txn_rate` + + + +
fun check_txn_rate(bundle: &mut permissioned_delegation::AccountDelegation, check_rate_limit: bool)
+
+ + + +
+Implementation + + +
inline fun check_txn_rate(bundle: &mut AccountDelegation, check_rate_limit: bool) {
+    let token_bucket = &mut bundle.rate_limiter;
+    if (check_rate_limit && token_bucket.is_some()) {
+        assert!(rate_limiter::request(token_bucket.borrow_mut(), 1), std::error::permission_denied(ERATE_LIMITED));
+    };
+}
+
+ + + +
+ + + +## Function `add_permissioned_handle` + + + +
public fun add_permissioned_handle(master: &signer, key: permissioned_delegation::DelegationKey, rate_limiter: option::Option<rate_limiter::RateLimiter>, expiration_time: u64): signer
+
+ + + +
+Implementation + + +
public fun add_permissioned_handle(
+    master: &signer,
+    key: DelegationKey,
+    rate_limiter: Option<RateLimiter>,
+    expiration_time: u64,
+): signer acquires RegisteredDelegations {
+    assert!(!is_permissioned_signer(master), error::permission_denied(ENOT_MASTER_SIGNER));
+    let addr = signer::address_of(master);
+    if (!exists<RegisteredDelegations>(addr)) {
+        move_to(master, RegisteredDelegations {
+            delegations: big_ordered_map::new_with_config(50, 20, false)
+        });
+    };
+    let handles = &mut RegisteredDelegations[addr].delegations;
+    assert!(!handles.contains(&key), error::already_exists(EDELEGATION_EXISTENCE));
+    let handle = permissioned_signer::create_storable_permissioned_handle(master, expiration_time);
+    let permissioned_signer = permissioned_signer::signer_from_storable_permissioned_handle(&handle);
+    handles.add(key, AccountDelegation::V1 { handle, rate_limiter });
+    permissioned_signer
+}
+
+ + + +
+ + + +## Function `remove_permissioned_handle` + + + +
public fun remove_permissioned_handle(master: &signer, key: permissioned_delegation::DelegationKey)
+
+ + + +
+Implementation + + +
public fun remove_permissioned_handle(
+    master: &signer,
+    key: DelegationKey,
+) acquires RegisteredDelegations {
+    assert!(!is_permissioned_signer(master), error::permission_denied(ENOT_MASTER_SIGNER));
+    let addr = signer::address_of(master);
+    let delegations = &mut RegisteredDelegations[addr].delegations;
+    assert!(delegations.contains(&key), error::not_found(EDELEGATION_EXISTENCE));
+    let delegation = delegations.remove(&key);
+    match (delegation) {
+        AccountDelegation::V1 { handle, rate_limiter: _ } => {
+            permissioned_signer::destroy_storable_permissioned_handle(handle);
+        }
+    };
+}
+
+ + + +
+ + + +## Function `permissioned_signer_by_key` + + + +
public fun permissioned_signer_by_key(master: &signer, key: permissioned_delegation::DelegationKey): signer
+
+ + + +
+Implementation + + +
public fun permissioned_signer_by_key(
+    master: &signer,
+    key: DelegationKey,
+): signer acquires RegisteredDelegations {
+    assert!(!is_permissioned_signer(master), error::permission_denied(ENOT_MASTER_SIGNER));
+    let addr = signer::address_of(master);
+    let handle = get_storable_permissioned_handle(addr, key, false);
+    permissioned_signer::signer_from_storable_permissioned_handle(handle)
+}
+
+ + + +
+ + + +## Function `handle_address_by_key` + + + +
public fun handle_address_by_key(master: address, key: permissioned_delegation::DelegationKey): address
+
+ + + +
+Implementation + + +
public fun handle_address_by_key(master: address, key: DelegationKey): address acquires RegisteredDelegations {
+    let handle = get_storable_permissioned_handle(master, key, false);
+    permissioned_signer::permissions_storage_address(handle)
+}
+
+ + + +
+ + + +## Function `authenticate` + +Authorization function for account abstraction. + + +
public fun authenticate(account: signer, abstraction_auth_data: auth_data::AbstractionAuthData): signer
+
+ + + +
+Implementation + + +
public fun authenticate(
+    account: signer,
+    abstraction_auth_data: AbstractionAuthData
+): signer acquires RegisteredDelegations {
+    let addr = signer::address_of(&account);
+    let stream = bcs_stream::new(*auth_data::authenticator(&abstraction_auth_data));
+    let public_key = new_unvalidated_public_key_from_bytes(
+        bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x))
+    );
+    let signature = new_signature_from_bytes(
+        bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x))
+    );
+    assert!(
+        ed25519::signature_verify_strict(
+            &signature,
+            &public_key,
+            *auth_data::digest(&abstraction_auth_data),
+        ),
+        error::permission_denied(EINVALID_SIGNATURE)
+    );
+    let handle = get_storable_permissioned_handle(addr, DelegationKey::Ed25519PublicKey(public_key), true);
+    permissioned_signer::signer_from_storable_permissioned_handle(handle)
+}
+
+ + + +
+ + + +## Function `get_storable_permissioned_handle` + + + +
fun get_storable_permissioned_handle(master: address, key: permissioned_delegation::DelegationKey, count_rate: bool): &permissioned_signer::StorablePermissionedHandle
+
+ + + +
+Implementation + + +
inline fun get_storable_permissioned_handle(
+    master: address,
+    key: DelegationKey,
+    count_rate: bool
+): &StorablePermissionedHandle {
+    if (exists<RegisteredDelegations>(master)) {
+        let delegations = &mut RegisteredDelegations[master].delegations;
+        if (delegations.contains(&key)) {
+            let delegation = delegations.remove(&key);
+            check_txn_rate(&mut delegation, count_rate);
+            delegations.add(key, delegation);
+            &delegations.borrow(&key).handle
+        } else {
+            abort error::permission_denied(EINVALID_SIGNATURE)
+        }
+    } else {
+        abort error::permission_denied(EINVALID_SIGNATURE)
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/permissioned_signer.md b/aptos-move/framework/aptos-framework/doc/permissioned_signer.md new file mode 100644 index 0000000000000..ab80381d257ba --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/permissioned_signer.md @@ -0,0 +1,2039 @@ + + + +# Module `0x1::permissioned_signer` + +A _permissioned signer_ consists of a pair of the original signer and a generated +address which is used to store information about associated permissions. + +A permissioned signer is a restricted version of a signer. Functions move_to and +address_of behave the same, and can be passed wherever signer is needed. However, +code can internally query for the permissions to assert additional restrictions on +the use of the signer. + +A client which is interested in restricting access granted via a signer can create a permissioned signer +and pass on to other existing code without changes to existing APIs. Core functions in the framework, for +example account functions, can then assert availability of permissions, effectively restricting +existing code in a compatible way. + +After introducing the core functionality, examples are provided for withdraw limit on accounts, and +for blind signing. + + +- [Struct `RevokePermissionHandlePermission`](#0x1_permissioned_signer_RevokePermissionHandlePermission) +- [Resource `GrantedPermissionHandles`](#0x1_permissioned_signer_GrantedPermissionHandles) +- [Enum `PermissionedHandle`](#0x1_permissioned_signer_PermissionedHandle) +- [Enum `StorablePermissionedHandle`](#0x1_permissioned_signer_StorablePermissionedHandle) +- [Enum Resource `PermissionStorage`](#0x1_permissioned_signer_PermissionStorage) +- [Enum `StoredPermission`](#0x1_permissioned_signer_StoredPermission) +- [Constants](#@Constants_0) +- [Function `create_permissioned_handle`](#0x1_permissioned_signer_create_permissioned_handle) +- [Function `destroy_permissioned_handle`](#0x1_permissioned_signer_destroy_permissioned_handle) +- [Function `signer_from_permissioned_handle`](#0x1_permissioned_signer_signer_from_permissioned_handle) +- [Function `is_permissioned_signer`](#0x1_permissioned_signer_is_permissioned_signer) +- [Function `grant_revoke_permission`](#0x1_permissioned_signer_grant_revoke_permission) +- [Function `revoke_permission_storage_address`](#0x1_permissioned_signer_revoke_permission_storage_address) +- [Function `revoke_all_handles`](#0x1_permissioned_signer_revoke_all_handles) +- [Function `initialize_permission_address`](#0x1_permissioned_signer_initialize_permission_address) +- [Function `create_storable_permissioned_handle`](#0x1_permissioned_signer_create_storable_permissioned_handle) +- [Function `destroy_storable_permissioned_handle`](#0x1_permissioned_signer_destroy_storable_permissioned_handle) +- [Function `destroy_permissions_storage_address`](#0x1_permissioned_signer_destroy_permissions_storage_address) +- [Function `signer_from_storable_permissioned_handle`](#0x1_permissioned_signer_signer_from_storable_permissioned_handle) +- [Function `permissions_storage_address`](#0x1_permissioned_signer_permissions_storage_address) +- [Function `assert_master_signer`](#0x1_permissioned_signer_assert_master_signer) +- [Function `is_above`](#0x1_permissioned_signer_is_above) +- [Function `consume_capacity`](#0x1_permissioned_signer_consume_capacity) +- [Function `increase_capacity`](#0x1_permissioned_signer_increase_capacity) +- [Function `merge`](#0x1_permissioned_signer_merge) +- [Function `map_or`](#0x1_permissioned_signer_map_or) +- [Function `insert_or`](#0x1_permissioned_signer_insert_or) +- [Function `authorize_increase`](#0x1_permissioned_signer_authorize_increase) +- [Function `authorize_unlimited`](#0x1_permissioned_signer_authorize_unlimited) +- [Function `grant_unlimited_with_permissioned_signer`](#0x1_permissioned_signer_grant_unlimited_with_permissioned_signer) +- [Function `increase_limit`](#0x1_permissioned_signer_increase_limit) +- [Function `check_permission_exists`](#0x1_permissioned_signer_check_permission_exists) +- [Function `check_permission_capacity_above`](#0x1_permissioned_signer_check_permission_capacity_above) +- [Function `check_permission_consume`](#0x1_permissioned_signer_check_permission_consume) +- [Function `capacity`](#0x1_permissioned_signer_capacity) +- [Function `revoke_permission`](#0x1_permissioned_signer_revoke_permission) +- [Function `address_of`](#0x1_permissioned_signer_address_of) +- [Function `borrow_address`](#0x1_permissioned_signer_borrow_address) +- [Function `is_permissioned_signer_impl`](#0x1_permissioned_signer_is_permissioned_signer_impl) +- [Function `permission_address`](#0x1_permissioned_signer_permission_address) +- [Function `signer_from_permissioned_handle_impl`](#0x1_permissioned_signer_signer_from_permissioned_handle_impl) +- [Specification](#@Specification_1) + - [Function `create_permissioned_handle`](#@Specification_1_create_permissioned_handle) + - [Function `destroy_permissioned_handle`](#@Specification_1_destroy_permissioned_handle) + - [Function `is_permissioned_signer`](#@Specification_1_is_permissioned_signer) + - [Function `revoke_permission_storage_address`](#@Specification_1_revoke_permission_storage_address) + - [Function `create_storable_permissioned_handle`](#@Specification_1_create_storable_permissioned_handle) + - [Function `destroy_storable_permissioned_handle`](#@Specification_1_destroy_storable_permissioned_handle) + - [Function `authorize_increase`](#@Specification_1_authorize_increase) + - [Function `check_permission_exists`](#@Specification_1_check_permission_exists) + - [Function `check_permission_capacity_above`](#@Specification_1_check_permission_capacity_above) + - [Function `check_permission_consume`](#@Specification_1_check_permission_consume) + - [Function `capacity`](#@Specification_1_capacity) + - [Function `is_permissioned_signer_impl`](#@Specification_1_is_permissioned_signer_impl) + - [Function `permission_address`](#@Specification_1_permission_address) + - [Function `signer_from_permissioned_handle_impl`](#@Specification_1_signer_from_permissioned_handle_impl) + + +
use 0x1::big_ordered_map;
+use 0x1::copyable_any;
+use 0x1::create_signer;
+use 0x1::error;
+use 0x1::features;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::timestamp;
+use 0x1::transaction_context;
+use 0x1::vector;
+
+ + + + + +## Struct `RevokePermissionHandlePermission` + +If a permissioned signer has this permission, it would be able to revoke other granted +permission handles in the same signer. + + +
struct RevokePermissionHandlePermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `GrantedPermissionHandles` + +Stores the list of granted permission handles for a given account. + + +
struct GrantedPermissionHandles has key
+
+ + + +
+Fields + + +
+
+active_handles: vector<address> +
+
+ Each address refers to a permissions_storage_addr that stores the PermissionStorage. +
+
+ + +
+ + + +## Enum `PermissionedHandle` + +A ephermeral permission handle that can be used to generate a permissioned signer with permission +configuration stored within. + + +
enum PermissionedHandle
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+master_account_addr: address +
+
+ Address of the signer that creates this handle. +
+
+permissions_storage_addr: address +
+
+ Address that stores PermissionStorage. +
+
+ + +
+ +
+ +
+ + + +## Enum `StorablePermissionedHandle` + +A permission handle that can be used to generate a permissioned signer. + +This handle is storable and thus should be treated very carefully as it serves similar functionality +as signer delegation. + + +
enum StorablePermissionedHandle has store
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+master_account_addr: address +
+
+ Address of the signer that creates this handle. +
+
+permissions_storage_addr: address +
+
+ Address that stores PermissionStorage. +
+
+expiration_time: u64 +
+
+ Permissioned signer can no longer be generated from this handle after expiration_time. +
+
+ + +
+ +
+ +
+ + + +## Enum Resource `PermissionStorage` + +The actual permission configuration stored on-chain. + +The address that holds PermissionStorage will be generated freshly every time a permission +handle gets created. + + +
enum PermissionStorage has key
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+perms: big_ordered_map::BigOrderedMap<copyable_any::Any, permissioned_signer::StoredPermission> +
+
+ A hetherogenous map from Permission structs defined by each different modules to + its permission capacity. +
+
+ + +
+ +
+ +
+ + + +## Enum `StoredPermission` + +Types of permission capacity stored on chain. + + +
enum StoredPermission has copy, drop, store
+
+ + + +
+Variants + + +
+Unlimited + + +
+Fields + + +
+
+ + +
+ +
+ +
+Capacity + + +
+Fields + + +
+
+0: u256 +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Constants + + + + +Cannot authorize a permission. + + +
const ECANNOT_AUTHORIZE: u64 = 2;
+
+ + + + + +signer doesn't have enough capacity to extract permission. + + +
const ECANNOT_EXTRACT_PERMISSION: u64 = 4;
+
+ + + + + +Trying to grant permission using non-master signer. + + +
const ENOT_MASTER_SIGNER: u64 = 1;
+
+ + + + + +Access permission information from a master signer. + + +
const ENOT_PERMISSIONED_SIGNER: u64 = 3;
+
+ + + + + +Permissioned signer feature is not activated. + + +
const EPERMISSION_SIGNER_DISABLED: u64 = 9;
+
+ + + + + +destroying permission handle that has already been revoked or not owned by the +given master signer. + + +
const E_NOT_ACTIVE: u64 = 8;
+
+ + + + + +permission handle has expired. + + +
const E_PERMISSION_EXPIRED: u64 = 5;
+
+ + + + + +storing extracted permission into a different signer. + + +
const E_PERMISSION_MISMATCH: u64 = 6;
+
+ + + + + +permission handle has been revoked by the original signer. + + +
const E_PERMISSION_REVOKED: u64 = 7;
+
+ + + + + + + +
const U256_MAX: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
+
+ + + + + +## Function `create_permissioned_handle` + +Create an ephermeral permission handle based on the master signer. + +This handle can be used to derive a signer that can be used in the context of +the current transaction. + + +
public fun create_permissioned_handle(master: &signer): permissioned_signer::PermissionedHandle
+
+ + + +
+Implementation + + +
public fun create_permissioned_handle(master: &signer): PermissionedHandle {
+    assert!(
+        features::is_permissioned_signer_enabled(),
+        error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+    );
+
+    assert_master_signer(master);
+    let permissions_storage_addr = generate_auid_address();
+    let master_account_addr = signer::address_of(master);
+
+    initialize_permission_address(permissions_storage_addr);
+
+    PermissionedHandle::V1 { master_account_addr, permissions_storage_addr }
+}
+
+ + + +
+ + + +## Function `destroy_permissioned_handle` + +Destroys an ephermeral permission handle. Clean up the permission stored in that handle + + +
public fun destroy_permissioned_handle(p: permissioned_signer::PermissionedHandle)
+
+ + + +
+Implementation + + +
public fun destroy_permissioned_handle(p: PermissionedHandle) acquires PermissionStorage {
+    assert!(
+        features::is_permissioned_signer_enabled(),
+        error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+    );
+    let PermissionedHandle::V1 { master_account_addr: _, permissions_storage_addr } =
+        p;
+    destroy_permissions_storage_address(permissions_storage_addr);
+}
+
+ + + +
+ + + +## Function `signer_from_permissioned_handle` + +Generate the permissioned signer based on the ephermeral permission handle. + +This signer can be used as a regular signer for other smart contracts. However when such +signer interacts with various framework functions, it would subject to permission checks +and would abort if check fails. + + +
public fun signer_from_permissioned_handle(p: &permissioned_signer::PermissionedHandle): signer
+
+ + + +
+Implementation + + +
public fun signer_from_permissioned_handle(p: &PermissionedHandle): signer {
+    assert!(
+        features::is_permissioned_signer_enabled(),
+        error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+    );
+    signer_from_permissioned_handle_impl(
+        p.master_account_addr, p.permissions_storage_addr
+    )
+}
+
+ + + +
+ + + +## Function `is_permissioned_signer` + +Returns true if s is a permissioned signer. + + +
public fun is_permissioned_signer(s: &signer): bool
+
+ + + +
+Implementation + + +
public fun is_permissioned_signer(s: &signer): bool {
+    // When the permissioned signer is disabled, no one is able to construct a permissioned
+    // signer. Thus we should return false here, as other on chain permission checks will
+    // depend on this checks.
+    if(!features::is_permissioned_signer_enabled()) {
+        return false;
+    };
+    is_permissioned_signer_impl(s)
+}
+
+ + + +
+ + + +## Function `grant_revoke_permission` + +Grant the permissioned signer the permission to revoke granted permission handles under +its address. + + +
public fun grant_revoke_permission(master: &signer, permissioned: &signer)
+
+ + + +
+Implementation + + +
public fun grant_revoke_permission(
+    master: &signer,
+    permissioned: &signer,
+) acquires PermissionStorage {
+    assert!(
+        features::is_permissioned_signer_enabled(),
+        error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+    );
+    authorize_unlimited(master, permissioned, RevokePermissionHandlePermission {});
+}
+
+ + + +
+ + + +## Function `revoke_permission_storage_address` + +Revoke a specific storable permission handle immediately. This will disallow owner of +the storable permission handle to derive signer from it anymore. + + +
public entry fun revoke_permission_storage_address(s: &signer, permissions_storage_addr: address)
+
+ + + +
+Implementation + + +
public entry fun revoke_permission_storage_address(
+    s: &signer, permissions_storage_addr: address
+) acquires GrantedPermissionHandles, PermissionStorage {
+    assert!(
+        features::is_permissioned_signer_enabled(),
+        error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+    );
+    assert!(
+        check_permission_exists(s, RevokePermissionHandlePermission {}),
+        error::permission_denied(ENOT_MASTER_SIGNER)
+    );
+    let master_account_addr = signer::address_of(s);
+
+    assert!(
+        exists<GrantedPermissionHandles>(master_account_addr),
+        error::permission_denied(E_PERMISSION_REVOKED),
+    );
+    let active_handles = &mut GrantedPermissionHandles[master_account_addr].active_handles;
+    let (found, idx) = active_handles.index_of(&permissions_storage_addr);
+
+    // The address has to be in the activated list in the master account address.
+    assert!(found, error::permission_denied(E_NOT_ACTIVE));
+    active_handles.swap_remove(idx);
+    destroy_permissions_storage_address(permissions_storage_addr);
+}
+
+ + + +
+ + + +## Function `revoke_all_handles` + +Revoke all storable permission handle of the signer immediately. + + +
public entry fun revoke_all_handles(s: &signer)
+
+ + + +
+Implementation + + +
public entry fun revoke_all_handles(s: &signer) acquires GrantedPermissionHandles, PermissionStorage {
+    assert!(
+        features::is_permissioned_signer_enabled(),
+        error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+    );
+    assert!(
+        check_permission_exists(s, RevokePermissionHandlePermission {}),
+        error::permission_denied(ENOT_MASTER_SIGNER)
+    );
+    let master_account_addr = signer::address_of(s);
+    if (!exists<GrantedPermissionHandles>(master_account_addr)) { return };
+
+    let granted_permissions =
+        borrow_global_mut<GrantedPermissionHandles>(master_account_addr);
+    let delete_list = vector::trim_reverse(
+        &mut granted_permissions.active_handles, 0
+    );
+    vector::destroy(
+        delete_list,
+        |address| {
+            destroy_permissions_storage_address(address);
+        }
+    )
+}
+
+ + + +
+ + + +## Function `initialize_permission_address` + +initialize permission storage by putting an empty storage under the address. + + +
fun initialize_permission_address(permissions_storage_addr: address)
+
+ + + +
+Implementation + + +
inline fun initialize_permission_address(permissions_storage_addr: address) {
+    move_to(
+        &create_signer(permissions_storage_addr),
+        // Each key is ~100bytes, the value is 12 bytes.
+        PermissionStorage::V1 { perms: big_ordered_map::new_with_config(40, 35, false) }
+    );
+}
+
+ + + +
+ + + +## Function `create_storable_permissioned_handle` + +Create an storable permission handle based on the master signer. + +This handle can be used to derive a signer that can be stored by a smart contract. +This is as dangerous as key delegation, thus it remains public(package) for now. + +The caller should check if expiration_time is not too far in the future. + + +
public(friend) fun create_storable_permissioned_handle(master: &signer, expiration_time: u64): permissioned_signer::StorablePermissionedHandle
+
+ + + +
+Implementation + + +
public(package) fun create_storable_permissioned_handle(
+    master: &signer, expiration_time: u64
+): StorablePermissionedHandle acquires GrantedPermissionHandles {
+    assert!(
+        features::is_permissioned_signer_enabled(),
+        error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+    );
+
+    assert_master_signer(master);
+    let permissions_storage_addr = generate_auid_address();
+    let master_account_addr = signer::address_of(master);
+
+    assert!(
+        timestamp::now_seconds() < expiration_time,
+        error::permission_denied(E_PERMISSION_EXPIRED)
+    );
+
+    if (!exists<GrantedPermissionHandles>(master_account_addr)) {
+        move_to<GrantedPermissionHandles>(
+            master, GrantedPermissionHandles { active_handles: vector::empty() }
+        );
+    };
+
+    GrantedPermissionHandles[master_account_addr]
+        .active_handles.push_back(permissions_storage_addr);
+
+    initialize_permission_address(permissions_storage_addr);
+
+    StorablePermissionedHandle::V1 {
+        master_account_addr,
+        permissions_storage_addr,
+        expiration_time
+    }
+}
+
+ + + +
+ + + +## Function `destroy_storable_permissioned_handle` + +Destroys a storable permission handle. Clean up the permission stored in that handle + + +
public(friend) fun destroy_storable_permissioned_handle(p: permissioned_signer::StorablePermissionedHandle)
+
+ + + +
+Implementation + + +
public(package) fun destroy_storable_permissioned_handle(
+    p: StorablePermissionedHandle
+) acquires PermissionStorage, GrantedPermissionHandles {
+    let StorablePermissionedHandle::V1 {
+        master_account_addr,
+        permissions_storage_addr,
+        expiration_time: _
+    } = p;
+
+    assert!(
+        exists<GrantedPermissionHandles>(master_account_addr),
+        error::permission_denied(E_PERMISSION_REVOKED),
+    );
+    let active_handles = &mut GrantedPermissionHandles[master_account_addr].active_handles;
+
+    let (found, idx) = active_handles.index_of(&permissions_storage_addr);
+
+    // Removing the address from the active handle list if it's still active.
+    if(found) {
+        active_handles.swap_remove(idx);
+    };
+
+    destroy_permissions_storage_address(permissions_storage_addr);
+}
+
+ + + +
+ + + +## Function `destroy_permissions_storage_address` + + + +
fun destroy_permissions_storage_address(permissions_storage_addr: address)
+
+ + + +
+Implementation + + +
inline fun destroy_permissions_storage_address(
+    permissions_storage_addr: address
+) acquires PermissionStorage {
+    if (exists<PermissionStorage>(permissions_storage_addr)) {
+        let PermissionStorage::V1 { perms } =
+            move_from<PermissionStorage>(permissions_storage_addr);
+        big_ordered_map::destroy(
+            perms,
+            |_dv| {},
+        );
+    }
+}
+
+ + + +
+ + + +## Function `signer_from_storable_permissioned_handle` + +Generate the permissioned signer based on the storable permission handle. + + +
public(friend) fun signer_from_storable_permissioned_handle(p: &permissioned_signer::StorablePermissionedHandle): signer
+
+ + + +
+Implementation + + +
public(package) fun signer_from_storable_permissioned_handle(
+    p: &StorablePermissionedHandle
+): signer {
+    assert!(
+        features::is_permissioned_signer_enabled(),
+        error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+    );
+    assert!(
+        timestamp::now_seconds() < p.expiration_time,
+        error::permission_denied(E_PERMISSION_EXPIRED)
+    );
+    assert!(
+        exists<PermissionStorage>(p.permissions_storage_addr),
+        error::permission_denied(E_PERMISSION_REVOKED)
+    );
+    signer_from_permissioned_handle_impl(
+        p.master_account_addr, p.permissions_storage_addr
+    )
+}
+
+ + + +
+ + + +## Function `permissions_storage_address` + +Return the permission handle address so that it could be used for revocation purpose. + + +
public(friend) fun permissions_storage_address(p: &permissioned_signer::StorablePermissionedHandle): address
+
+ + + +
+Implementation + + +
public(package) fun permissions_storage_address(
+    p: &StorablePermissionedHandle
+): address {
+    p.permissions_storage_addr
+}
+
+ + + +
+ + + +## Function `assert_master_signer` + +Helper function that would abort if the signer passed in is a permissioned signer. + + +
public(friend) fun assert_master_signer(s: &signer)
+
+ + + +
+Implementation + + +
public(package) fun assert_master_signer(s: &signer) {
+    assert!(
+        !is_permissioned_signer(s), error::permission_denied(ENOT_MASTER_SIGNER)
+    );
+}
+
+ + + +
+ + + +## Function `is_above` + +===================================================================================================== +StoredPermission operations + +check if StoredPermission has at least threshold capacity. + + +
fun is_above(perm: &permissioned_signer::StoredPermission, threshold: u256): bool
+
+ + + +
+Implementation + + +
fun is_above(perm: &StoredPermission, threshold: u256): bool {
+    match (perm) {
+        StoredPermission::Capacity(capacity) => *capacity >= threshold,
+        StoredPermission::Unlimited => true,
+    }
+}
+
+ + + +
+ + + +## Function `consume_capacity` + +consume threshold capacity from StoredPermission + + +
fun consume_capacity(perm: &mut permissioned_signer::StoredPermission, threshold: u256): bool
+
+ + + +
+Implementation + + +
fun consume_capacity(perm: &mut StoredPermission, threshold: u256): bool {
+    match (perm) {
+        StoredPermission::Capacity(current_capacity) => {
+            if (*current_capacity >= threshold) {
+                *current_capacity = *current_capacity - threshold;
+                true
+            } else { false }
+        }
+        StoredPermission::Unlimited => true
+    }
+}
+
+ + + +
+ + + +## Function `increase_capacity` + +increase threshold capacity from StoredPermission + + +
fun increase_capacity(perm: &mut permissioned_signer::StoredPermission, threshold: u256)
+
+ + + +
+Implementation + + +
fun increase_capacity(perm: &mut StoredPermission, threshold: u256) {
+    match (perm) {
+        StoredPermission::Capacity(current_capacity) => {
+            *current_capacity = *current_capacity + threshold;
+        }
+        StoredPermission::Unlimited => (),
+    }
+}
+
+ + + +
+ + + +## Function `merge` + +merge the two stored permission + + +
fun merge(lhs: &mut permissioned_signer::StoredPermission, rhs: permissioned_signer::StoredPermission)
+
+ + + +
+Implementation + + +
fun merge(lhs: &mut StoredPermission, rhs: StoredPermission) {
+    match (rhs) {
+        StoredPermission::Capacity(new_capacity) => {
+            match (lhs) {
+                StoredPermission::Capacity(current_capacity) => {
+                    *current_capacity = *current_capacity + new_capacity;
+                }
+                StoredPermission::Unlimited => (),
+            }
+        }
+        StoredPermission::Unlimited => *lhs = StoredPermission::Unlimited,
+    }
+}
+
+ + + +
+ + + +## Function `map_or` + +===================================================================================================== +Permission Management + +Authorizes permissioned with the given permission. This requires to have access to the master +signer. + + +
fun map_or<PermKey: copy, drop, store, T>(permissioned: &signer, perm: PermKey, mutate: |&mut permissioned_signer::StoredPermission|T, default: T): T
+
+ + + +
+Implementation + + +
inline fun map_or<PermKey: copy + drop + store, T>(
+    permissioned: &signer,
+    perm: PermKey,
+    mutate: |&mut StoredPermission| T,
+    default: T,
+): T {
+    let permission_signer_addr = permission_address(permissioned);
+    assert!(
+        exists<PermissionStorage>(permission_signer_addr),
+        error::permission_denied(E_NOT_ACTIVE)
+    );
+    let perms =
+        &mut borrow_global_mut<PermissionStorage>(permission_signer_addr).perms;
+    let key = copyable_any::pack(perm);
+    if (big_ordered_map::contains(perms, &key)) {
+        let value = perms.remove(&key);
+        let return_ = mutate(&mut value);
+        perms.add(key, value);
+        return_
+    } else {
+        default
+    }
+}
+
+ + + +
+ + + +## Function `insert_or` + + + +
fun insert_or<PermKey: copy, drop, store>(permissioned: &signer, perm: PermKey, mutate: |&mut permissioned_signer::StoredPermission|, default: permissioned_signer::StoredPermission)
+
+ + + +
+Implementation + + +
inline fun insert_or<PermKey: copy + drop + store>(
+    permissioned: &signer,
+    perm: PermKey,
+    mutate: |&mut StoredPermission|,
+    default: StoredPermission,
+) {
+    let permission_signer_addr = permission_address(permissioned);
+    assert!(
+        exists<PermissionStorage>(permission_signer_addr),
+        error::permission_denied(E_NOT_ACTIVE)
+    );
+    let perms =
+        &mut borrow_global_mut<PermissionStorage>(permission_signer_addr).perms;
+    let key = copyable_any::pack(perm);
+    if (perms.contains(&key)) {
+        let value = perms.remove(&key);
+        mutate(&mut value);
+        perms.add(key, value);
+    } else {
+        perms.add(key, default);
+    }
+}
+
+ + + +
+ + + +## Function `authorize_increase` + +Authorizes permissioned with a given capacity and increment the existing capacity if present. + +Consumption using check_permission_consume will deduct the capacity. + + +
public(friend) fun authorize_increase<PermKey: copy, drop, store>(master: &signer, permissioned: &signer, capacity: u256, perm: PermKey)
+
+ + + +
+Implementation + + +
public(package) fun authorize_increase<PermKey: copy + drop + store>(
+    master: &signer,
+    permissioned: &signer,
+    capacity: u256,
+    perm: PermKey
+) acquires PermissionStorage {
+    assert!(
+        is_permissioned_signer(permissioned)
+            && !is_permissioned_signer(master)
+            && signer::address_of(master) == signer::address_of(permissioned),
+        error::permission_denied(ECANNOT_AUTHORIZE)
+    );
+    insert_or(
+        permissioned,
+        perm,
+        |stored_permission| {
+            increase_capacity(stored_permission, capacity);
+        },
+        StoredPermission::Capacity(capacity),
+    )
+}
+
+ + + +
+ + + +## Function `authorize_unlimited` + +Authorizes permissioned with the given unlimited permission. +Unlimited permission can be consumed however many times. + + +
public(friend) fun authorize_unlimited<PermKey: copy, drop, store>(master: &signer, permissioned: &signer, perm: PermKey)
+
+ + + +
+Implementation + + +
public(package) fun authorize_unlimited<PermKey: copy + drop + store>(
+    master: &signer,
+    permissioned: &signer,
+    perm: PermKey
+) acquires PermissionStorage {
+    assert!(
+        is_permissioned_signer(permissioned)
+            && !is_permissioned_signer(master)
+            && signer::address_of(master) == signer::address_of(permissioned),
+        error::permission_denied(ECANNOT_AUTHORIZE)
+    );
+    insert_or(
+        permissioned,
+        perm,
+        |stored_permission| {
+            *stored_permission = StoredPermission::Unlimited;
+        },
+        StoredPermission::Unlimited,
+    )
+}
+
+ + + +
+ + + +## Function `grant_unlimited_with_permissioned_signer` + +Grant an unlimited permission to a permissioned signer **without** master signer's approvoal. + + +
public(friend) fun grant_unlimited_with_permissioned_signer<PermKey: copy, drop, store>(permissioned: &signer, perm: PermKey)
+
+ + + +
+Implementation + + +
public(package) fun grant_unlimited_with_permissioned_signer<PermKey: copy + drop + store>(
+    permissioned: &signer,
+    perm: PermKey
+) acquires PermissionStorage {
+    if(!is_permissioned_signer(permissioned)) {
+        return;
+    };
+    insert_or(
+        permissioned,
+        perm,
+        |stored_permission| {
+            *stored_permission = StoredPermission::Unlimited;
+        },
+        StoredPermission::Unlimited,
+    )
+}
+
+ + + +
+ + + +## Function `increase_limit` + +Increase the capacity of a permissioned signer **without** master signer's approvoal. + +The caller of the module will need to make sure the witness type PermKey can only be +constructed within its own module, otherwise attackers can refill the permission for itself +to bypass the checks. + + +
public(friend) fun increase_limit<PermKey: copy, drop, store>(permissioned: &signer, capacity: u256, perm: PermKey)
+
+ + + +
+Implementation + + +
public(package) fun increase_limit<PermKey: copy + drop + store>(
+    permissioned: &signer,
+    capacity: u256,
+    perm: PermKey
+) acquires PermissionStorage {
+    if(!is_permissioned_signer(permissioned)) {
+        return;
+    };
+    insert_or(
+        permissioned,
+        perm,
+        |stored_permission| {
+            increase_capacity(stored_permission, capacity);
+        },
+        StoredPermission::Capacity(capacity),
+    )
+}
+
+ + + +
+ + + +## Function `check_permission_exists` + + + +
public(friend) fun check_permission_exists<PermKey: copy, drop, store>(s: &signer, perm: PermKey): bool
+
+ + + +
+Implementation + + +
public(package) fun check_permission_exists<PermKey: copy + drop + store>(
+    s: &signer, perm: PermKey
+): bool acquires PermissionStorage {
+    // 0 capacity permissions will be treated as non-existant.
+    check_permission_capacity_above(s, 1, perm)
+}
+
+ + + +
+ + + +## Function `check_permission_capacity_above` + + + +
public(friend) fun check_permission_capacity_above<PermKey: copy, drop, store>(s: &signer, threshold: u256, perm: PermKey): bool
+
+ + + +
+Implementation + + +
public(package) fun check_permission_capacity_above<PermKey: copy + drop + store>(
+    s: &signer, threshold: u256, perm: PermKey
+): bool acquires PermissionStorage {
+    if (!is_permissioned_signer(s)) {
+        // master signer has all permissions
+        return true
+    };
+    map_or(
+        s,
+        perm,
+        |stored_permission| {
+            is_above(stored_permission, threshold)
+        },
+        false,
+    )
+}
+
+ + + +
+ + + +## Function `check_permission_consume` + + + +
public(friend) fun check_permission_consume<PermKey: copy, drop, store>(s: &signer, threshold: u256, perm: PermKey): bool
+
+ + + +
+Implementation + + +
public(package) fun check_permission_consume<PermKey: copy + drop + store>(
+    s: &signer, threshold: u256, perm: PermKey
+): bool acquires PermissionStorage {
+    if (!is_permissioned_signer(s)) {
+        // master signer has all permissions
+        return true
+    };
+    map_or(
+        s,
+        perm,
+        |stored_permission| {
+             consume_capacity(stored_permission, threshold)
+        },
+        false,
+    )
+}
+
+ + + +
+ + + +## Function `capacity` + + + +
public(friend) fun capacity<PermKey: copy, drop, store>(s: &signer, perm: PermKey): option::Option<u256>
+
+ + + +
+Implementation + + +
public(package) fun capacity<PermKey: copy + drop + store>(
+    s: &signer, perm: PermKey
+): Option<u256> acquires PermissionStorage {
+    if (!is_permissioned_signer(s)) {
+        return option::some(U256_MAX)
+    };
+    map_or(
+        s,
+        perm,
+        |stored_permission: &mut StoredPermission| {
+            option::some(match (stored_permission) {
+                StoredPermission::Capacity(capacity) => *capacity,
+                StoredPermission::Unlimited => U256_MAX,
+            })
+        },
+        option::none(),
+    )
+}
+
+ + + +
+ + + +## Function `revoke_permission` + + + +
public(friend) fun revoke_permission<PermKey: copy, drop, store>(permissioned: &signer, perm: PermKey)
+
+ + + +
+Implementation + + +
public(package) fun revoke_permission<PermKey: copy + drop + store>(
+    permissioned: &signer, perm: PermKey
+) acquires PermissionStorage {
+    if (!is_permissioned_signer(permissioned)) {
+        // Master signer has no permissions associated with it.
+        return
+    };
+    let addr = permission_address(permissioned);
+    if (!exists<PermissionStorage>(addr)) { return };
+    let perm_storage = &mut PermissionStorage[addr].perms;
+    let key = copyable_any::pack(perm);
+    if (perm_storage.contains(&key)) {
+        perm_storage.remove(&key);
+    }
+}
+
+ + + +
+ + + +## Function `address_of` + +Unused function. Keeping it for compatibility purpose. + + +
public fun address_of(_s: &signer): address
+
+ + + +
+Implementation + + +
public fun address_of(_s: &signer): address {
+    abort error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+}
+
+ + + +
+ + + +## Function `borrow_address` + +Unused function. Keeping it for compatibility purpose. + + +
public fun borrow_address(_s: &signer): &address
+
+ + + +
+Implementation + + +
public fun borrow_address(_s: &signer): &address {
+    abort error::permission_denied(EPERMISSION_SIGNER_DISABLED)
+}
+
+ + + +
+ + + +## Function `is_permissioned_signer_impl` + + +Check whether this is a permissioned signer. + + +
fun is_permissioned_signer_impl(s: &signer): bool
+
+ + + +
+Implementation + + +
native fun is_permissioned_signer_impl(s: &signer): bool;
+
+ + + +
+ + + +## Function `permission_address` + +Return the address used for storing permissions. Aborts if not a permissioned signer. + + +
fun permission_address(permissioned: &signer): address
+
+ + + +
+Implementation + + +
native fun permission_address(permissioned: &signer): address;
+
+ + + +
+ + + +## Function `signer_from_permissioned_handle_impl` + +Creates a permissioned signer from an existing universal signer. The function aborts if the +given signer is already a permissioned signer. + +The implementation of this function requires to extend the value representation for signers in the VM. +invariants: +signer::address_of(master) == signer::address_of(signer_from_permissioned_handle(create_permissioned_handle(master))), + + +
fun signer_from_permissioned_handle_impl(master_account_addr: address, permissions_storage_addr: address): signer
+
+ + + +
+Implementation + + +
native fun signer_from_permissioned_handle_impl(
+    master_account_addr: address, permissions_storage_addr: address
+): signer;
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+axiom forall a: GrantedPermissionHandles:
+    (
+        forall i in 0..len(a.active_handles):
+            forall j in 0..len(a.active_handles):
+                i != j ==>
+                    a.active_handles[i] != a.active_handles[j]
+    );
+
+ + + + + + + +
fun spec_is_permissioned_signer_impl(s: signer): bool;
+
+ + + + + +### Function `create_permissioned_handle` + + +
public fun create_permissioned_handle(master: &signer): permissioned_signer::PermissionedHandle
+
+ + + + +
pragma opaque;
+aborts_if [abstract] spec_is_permissioned_signer(master);
+let permissions_storage_addr = transaction_context::spec_generate_unique_address();
+modifies global<PermissionStorage>(permissions_storage_addr);
+let master_account_addr = signer::address_of(master);
+ensures result.master_account_addr == master_account_addr;
+ensures result.permissions_storage_addr == permissions_storage_addr;
+
+ + + + + +### Function `destroy_permissioned_handle` + + +
public fun destroy_permissioned_handle(p: permissioned_signer::PermissionedHandle)
+
+ + + + +
ensures !exists<PermissionStorage>(p.permissions_storage_addr);
+
+ + + + + +### Function `is_permissioned_signer` + + +
public fun is_permissioned_signer(s: &signer): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_is_permissioned_signer(s);
+
+ + + + + + + +
fun spec_permission_address(s: signer): address;
+
+ + + + + +### Function `revoke_permission_storage_address` + + +
public entry fun revoke_permission_storage_address(s: &signer, permissions_storage_addr: address)
+
+ + + + + + +### Function `create_storable_permissioned_handle` + + +
public(friend) fun create_storable_permissioned_handle(master: &signer, expiration_time: u64): permissioned_signer::StorablePermissionedHandle
+
+ + + + +
pragma opaque;
+aborts_if [abstract] spec_is_permissioned_signer(master);
+let permissions_storage_addr = transaction_context::spec_generate_unique_address();
+modifies global<PermissionStorage>(permissions_storage_addr);
+let master_account_addr = signer::address_of(master);
+modifies global<GrantedPermissionHandles>(master_account_addr);
+ensures result.master_account_addr == master_account_addr;
+ensures result.permissions_storage_addr == permissions_storage_addr;
+ensures result.expiration_time == expiration_time;
+ensures vector::spec_contains(
+    global<GrantedPermissionHandles>(master_account_addr).active_handles,
+    permissions_storage_addr
+);
+ensures exists<GrantedPermissionHandles>(master_account_addr);
+
+ + + + + +### Function `destroy_storable_permissioned_handle` + + +
public(friend) fun destroy_storable_permissioned_handle(p: permissioned_signer::StorablePermissionedHandle)
+
+ + + + +
ensures !exists<PermissionStorage>(p.permissions_storage_addr);
+let post granted_permissions = global<GrantedPermissionHandles>(
+    p.master_account_addr
+);
+
+ + + + + +### Function `authorize_increase` + + +
public(friend) fun authorize_increase<PermKey: copy, drop, store>(master: &signer, permissioned: &signer, capacity: u256, perm: PermKey)
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if !spec_is_permissioned_signer(permissioned);
+aborts_if spec_is_permissioned_signer(master);
+aborts_if signer::address_of(permissioned) != signer::address_of(master);
+ensures exists<PermissionStorage>(
+    spec_permission_address(permissioned)
+);
+
+ + + + + +### Function `check_permission_exists` + + +
public(friend) fun check_permission_exists<PermKey: copy, drop, store>(s: &signer, perm: PermKey): bool
+
+ + + + +
pragma opaque;
+modifies global<PermissionStorage>(spec_permission_address(s));
+ensures [abstract] result == spec_check_permission_exists(s, perm);
+
+ + + + + + + +
fun spec_check_permission_exists<PermKey: copy + drop + store>(s: signer, perm: PermKey): bool;
+
+ + + + + +### Function `check_permission_capacity_above` + + +
public(friend) fun check_permission_capacity_above<PermKey: copy, drop, store>(s: &signer, threshold: u256, perm: PermKey): bool
+
+ + + + +
modifies global<PermissionStorage>(spec_permission_address(s));
+let permissioned_signer_addr = spec_permission_address(s);
+ensures !spec_is_permissioned_signer(s) ==> result == true;
+ensures (
+    spec_is_permissioned_signer(s)
+        && !exists<PermissionStorage>(permissioned_signer_addr)
+) ==> result == false;
+
+ + + + + +### Function `check_permission_consume` + + +
public(friend) fun check_permission_consume<PermKey: copy, drop, store>(s: &signer, threshold: u256, perm: PermKey): bool
+
+ + + + +
pragma opaque;
+let permissioned_signer_addr = spec_permission_address(s);
+modifies global<PermissionStorage>(spec_permission_address(s));
+ensures [abstract] result == spec_check_permission_consume(s, threshold, perm);
+
+ + + + + + + +
fun spec_check_permission_consume<PermKey: copy + drop + store>(s: signer, threshold: u256, perm: PermKey): bool;
+
+ + + + + +### Function `capacity` + + +
public(friend) fun capacity<PermKey: copy, drop, store>(s: &signer, perm: PermKey): option::Option<u256>
+
+ + + + +
pragma opaque;
+let permissioned_signer_addr = spec_permission_address(s);
+modifies global<PermissionStorage>(spec_permission_address(s));
+ensures [abstract] result == spec_capacity(s, perm);
+
+ + + + + + + +
fun spec_capacity<PermKey: copy + drop + store>(s: signer, perm: PermKey): Option<u256>;
+
+ + + + + +### Function `is_permissioned_signer_impl` + + +
fun is_permissioned_signer_impl(s: &signer): bool
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_is_permissioned_signer_impl(s);
+
+ + + + + + + +
fun spec_is_permissioned_signer(s: signer): bool {
+   use std::features;
+   use std::features::PERMISSIONED_SIGNER;
+   if (!features::spec_is_enabled(PERMISSIONED_SIGNER)) {
+       false
+   } else {
+       spec_is_permissioned_signer_impl(s)
+   }
+}
+
+ + + + + +### Function `permission_address` + + +
fun permission_address(permissioned: &signer): address
+
+ + + + +
pragma opaque;
+aborts_if [abstract]!spec_is_permissioned_signer(permissioned);
+ensures [abstract] result == spec_permission_address(permissioned);
+
+ + + + + + + +
fun spec_signer_from_permissioned_handle_impl(
+   master_account_addr: address, permissions_storage_addr: address
+): signer;
+
+ + + + + +### Function `signer_from_permissioned_handle_impl` + + +
fun signer_from_permissioned_handle_impl(master_account_addr: address, permissions_storage_addr: address): signer
+
+ + + + +
pragma opaque;
+ensures [abstract] result
+    == spec_signer_from_permissioned_handle_impl(
+        master_account_addr, permissions_storage_addr
+    );
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/primary_fungible_store.md b/aptos-move/framework/aptos-framework/doc/primary_fungible_store.md index 3427f7836b8a9..c373dd8473f6f 100644 --- a/aptos-move/framework/aptos-framework/doc/primary_fungible_store.md +++ b/aptos-move/framework/aptos-framework/doc/primary_fungible_store.md @@ -28,12 +28,14 @@ fungible asset to it. This emits an deposit event. - [Function `primary_store_address_inlined`](#0x1_primary_fungible_store_primary_store_address_inlined) - [Function `primary_store_inlined`](#0x1_primary_fungible_store_primary_store_inlined) - [Function `primary_store_exists_inlined`](#0x1_primary_fungible_store_primary_store_exists_inlined) +- [Function `grant_permission`](#0x1_primary_fungible_store_grant_permission) +- [Function `grant_apt_permission`](#0x1_primary_fungible_store_grant_apt_permission) - [Function `balance`](#0x1_primary_fungible_store_balance) - [Function `is_balance_at_least`](#0x1_primary_fungible_store_is_balance_at_least) - [Function `is_frozen`](#0x1_primary_fungible_store_is_frozen) - [Function `withdraw`](#0x1_primary_fungible_store_withdraw) - [Function `deposit`](#0x1_primary_fungible_store_deposit) -- [Function `force_deposit`](#0x1_primary_fungible_store_force_deposit) +- [Function `deposit_with_signer`](#0x1_primary_fungible_store_deposit_with_signer) - [Function `transfer`](#0x1_primary_fungible_store_transfer) - [Function `transfer_assert_minimum_deposit`](#0x1_primary_fungible_store_transfer_assert_minimum_deposit) - [Function `mint`](#0x1_primary_fungible_store_mint) @@ -48,8 +50,7 @@ fungible asset to it. This emits an deposit event. - [Module-level Specification](#module-level-spec) -
use 0x1::account;
-use 0x1::dispatchable_fungible_asset;
+
use 0x1::dispatchable_fungible_asset;
 use 0x1::fungible_asset;
 use 0x1::object;
 use 0x1::option;
@@ -364,6 +365,73 @@ Use instead of the corresponding view functions for dispatchable hooks to avoid
 
 
 
+
+ + + +## Function `grant_permission` + + + +
public fun grant_permission<T: key>(master: &signer, permissioned: &signer, metadata: object::Object<T>, amount: u64)
+
+ + + +
+Implementation + + +
public fun grant_permission<T: key>(
+    master: &signer,
+    permissioned: &signer,
+    metadata: Object<T>,
+    amount: u64
+) {
+    fungible_asset::grant_permission_by_address(
+        master,
+        permissioned,
+        primary_store_address_inlined(signer::address_of(permissioned), metadata),
+        amount
+    );
+}
+
+ + + +
+ + + +## Function `grant_apt_permission` + + + +
public fun grant_apt_permission(master: &signer, permissioned: &signer, amount: u64)
+
+ + + +
+Implementation + + +
public fun grant_apt_permission(
+    master: &signer,
+    permissioned: &signer,
+    amount: u64
+) {
+    fungible_asset::grant_permission_by_address(
+        master,
+        permissioned,
+        object::create_user_derived_object_address(signer::address_of(permissioned), @aptos_fungible_asset),
+        amount
+    );
+}
+
+ + +
@@ -385,7 +453,7 @@ Get the balance of account's p
public fun balance<T: key>(account: address, metadata: Object<T>): u64 {
     if (primary_store_exists(account, metadata)) {
-        fungible_asset::balance(primary_store(account, metadata))
+        dispatchable_fungible_asset::derived_balance(primary_store(account, metadata))
     } else {
         0
     }
@@ -414,7 +482,7 @@ Get the balance of account's p
 
 
public fun is_balance_at_least<T: key>(account: address, metadata: Object<T>, amount: u64): bool {
     if (primary_store_exists(account, metadata)) {
-        fungible_asset::is_balance_at_least(primary_store(account, metadata), amount)
+        dispatchable_fungible_asset::is_derived_balance_at_least(primary_store(account, metadata), amount)
     } else {
         amount == 0
     }
@@ -510,14 +578,17 @@ Deposit fungible asset fa to the given account's primary store.
 
 
 
-
+
 
-## Function `force_deposit`
+## Function `deposit_with_signer`
 
-Deposit fungible asset fa to the given account's primary store.
+Deposit fungible asset fa to the given account's primary store using signer.
 
+If owner is a permissioned signer, the signer will be granted with permission to withdraw
+the same amount of fund in the future.
 
-
public(friend) fun force_deposit(owner: address, fa: fungible_asset::FungibleAsset)
+
+
public fun deposit_with_signer(owner: &signer, fa: fungible_asset::FungibleAsset)
 
@@ -526,10 +597,18 @@ Deposit fungible asset fa to the given account's primary store. Implementation -
public(friend) fun force_deposit(owner: address, fa: FungibleAsset) acquires DeriveRefPod {
+
public fun deposit_with_signer(owner: &signer, fa: FungibleAsset) acquires DeriveRefPod {
+    fungible_asset::refill_permission(
+        owner,
+        fungible_asset::amount(&fa),
+        primary_store_address_inlined(
+            signer::address_of(owner),
+            fungible_asset::metadata_from_asset(&fa),
+        )
+    );
     let metadata = fungible_asset::asset_metadata(&fa);
-    let store = ensure_primary_store_exists(owner, metadata);
-    fungible_asset::deposit_internal(object::object_address(&store), fa);
+    let store = ensure_primary_store_exists(signer::address_of(owner), metadata);
+    dispatchable_fungible_asset::deposit(store, fa);
 }
 
@@ -559,8 +638,6 @@ Transfer amount of fungible asset from sender's primary store to re recipient: address, amount: u64, ) acquires DeriveRefPod { - // Create account if it does not yet exist, otherwise funds may get stuck in new accounts. - account::create_account_if_does_not_exist(recipient); let sender_store = ensure_primary_store_exists(signer::address_of(sender), metadata); // Check if the sender store object has been burnt or not. If so, unburn it first. may_be_unburn(sender, sender_store); @@ -723,7 +800,7 @@ Withdraw from the primary store of owner ignoring frozen flag. ## Function `deposit_with_ref` -Deposit from the primary store of owner ignoring frozen flag. +Deposit to the primary store of owner ignoring frozen flag.
public fun deposit_with_ref(transfer_ref: &fungible_asset::TransferRef, owner: address, fa: fungible_asset::FungibleAsset)
@@ -736,11 +813,11 @@ Deposit from the primary store of owner ignoring frozen flag.
 
 
 
public fun deposit_with_ref(transfer_ref: &TransferRef, owner: address, fa: FungibleAsset) acquires DeriveRefPod {
-    let from_primary_store = ensure_primary_store_exists(
+    let to_primary_store = ensure_primary_store_exists(
         owner,
         fungible_asset::transfer_ref_metadata(transfer_ref)
     );
-    fungible_asset::deposit_with_ref(transfer_ref, from_primary_store, fa);
+    fungible_asset::deposit_with_ref(transfer_ref, to_primary_store, fa);
 }
 
diff --git a/aptos-move/framework/aptos-framework/doc/randomness.md b/aptos-move/framework/aptos-framework/doc/randomness.md index bb90390ae5d3f..f822c004caaa7 100644 --- a/aptos-move/framework/aptos-framework/doc/randomness.md +++ b/aptos-move/framework/aptos-framework/doc/randomness.md @@ -36,7 +36,7 @@ Security holds under the same proof-of-stake assumption that secures the Aptos n - [Function `u256_range`](#0x1_randomness_u256_range) - [Function `permutation`](#0x1_randomness_permutation) - [Function `safe_add_mod`](#0x1_randomness_safe_add_mod) -- [Function `safe_add_mod_for_verification`](#0x1_randomness_safe_add_mod_for_verification) +- [Function `take_first`](#0x1_randomness_take_first) - [Function `fetch_and_increment_txn_counter`](#0x1_randomness_fetch_and_increment_txn_counter) - [Function `is_unbiasable`](#0x1_randomness_is_unbiasable) - [Specification](#@Specification_1) @@ -54,7 +54,6 @@ Security holds under the same proof-of-stake assumption that secures the Aptos n - [Function `u64_range`](#@Specification_1_u64_range) - [Function `u256_range`](#@Specification_1_u256_range) - [Function `permutation`](#@Specification_1_permutation) - - [Function `safe_add_mod_for_verification`](#@Specification_1_safe_add_mod_for_verification) - [Function `fetch_and_increment_txn_counter`](#@Specification_1_fetch_and_increment_txn_counter) - [Function `is_unbiasable`](#@Specification_1_is_unbiasable) @@ -320,7 +319,7 @@ Generates a sequence of bytes uniformly at random let c = 0; while (c < n) { let blob = next_32_bytes(); - vector::append(&mut v, blob); + vector::reverse_append(&mut v, blob); c = c + 32; }; @@ -827,6 +826,8 @@ If n is 0, returns the empty vector.
public fun permutation(n: u64): vector<u64> acquires PerBlockRandomness {
+    event::emit(RandomnessGeneratedEvent {});
+
     let values = vector[];
 
     if(n == 0) {
@@ -865,8 +866,6 @@ If n is 0, returns the empty vector.
         tail = tail - 1;
     };
 
-    event::emit(RandomnessGeneratedEvent {});
-
     values
 }
 
@@ -891,13 +890,11 @@ Compute (a + b) % m, assuming m >= 1, 0 <= a < m, 0& Implementation -
inline fun safe_add_mod(a: u256, b: u256, m: u256): u256 {
+
fun safe_add_mod(a: u256, b: u256, m: u256): u256 {
+    let a_clone = a;
     let neg_b = m - b;
-    if (a < neg_b) {
-        a + b
-    } else {
-        a - neg_b
-    }
+    let a_less = a < neg_b;
+    take_first(if (a_less) { a + b } else { a_clone - neg_b }, if (!a_less) { a_clone - neg_b } else { a + b })
 }
 
@@ -905,14 +902,13 @@ Compute (a + b) % m, assuming m >= 1, 0 <= a < m, 0& - + -## Function `safe_add_mod_for_verification` +## Function `take_first` -
#[verify_only]
-fun safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256
+
fun take_first(x: u256, _y: u256): u256
 
@@ -921,14 +917,7 @@ Compute (a + b) % m, assuming m >= 1, 0 <= a < m, 0& Implementation -
fun safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256 {
-    let neg_b = m - b;
-    if (a < neg_b) {
-        a + b
-    } else {
-        a - neg_b
-    }
-}
+
fun take_first(x: u256, _y: u256 ): u256 { x }
 
@@ -1276,25 +1265,6 @@ function as its payload. - - -### Function `safe_add_mod_for_verification` - - -
#[verify_only]
-fun safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256
-
- - - - -
aborts_if m < b;
-aborts_if a < m - b && a + b > MAX_U256;
-ensures result == spec_safe_add_mod(a, b, m);
-
- - - diff --git a/aptos-move/framework/aptos-framework/doc/randomness_api_v0_config.md b/aptos-move/framework/aptos-framework/doc/randomness_api_v0_config.md index b89ad59c0e371..da6e12e520409 100644 --- a/aptos-move/framework/aptos-framework/doc/randomness_api_v0_config.md +++ b/aptos-move/framework/aptos-framework/doc/randomness_api_v0_config.md @@ -176,7 +176,7 @@ Only used in reconfigurations to apply the pending on_new_epoch(framework: &signer) acquires RequiredGasDeposit, AllowCustomMaxGasFlag { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist<RequiredGasDeposit>()) { - let new_config = config_buffer::extract<RequiredGasDeposit>(); + let new_config = config_buffer::extract_v2<RequiredGasDeposit>(); if (exists<RequiredGasDeposit>(@aptos_framework)) { *borrow_global_mut<RequiredGasDeposit>(@aptos_framework) = new_config; } else { @@ -184,7 +184,7 @@ Only used in reconfigurations to apply the pending config_buffer::does_exist<AllowCustomMaxGasFlag>()) { - let new_config = config_buffer::extract<AllowCustomMaxGasFlag>(); + let new_config = config_buffer::extract_v2<AllowCustomMaxGasFlag>(); if (exists<AllowCustomMaxGasFlag>(@aptos_framework)) { *borrow_global_mut<AllowCustomMaxGasFlag>(@aptos_framework) = new_config; } else { diff --git a/aptos-move/framework/aptos-framework/doc/randomness_config.md b/aptos-move/framework/aptos-framework/doc/randomness_config.md index b658b03cdd6c0..ba793f4bc058f 100644 --- a/aptos-move/framework/aptos-framework/doc/randomness_config.md +++ b/aptos-move/framework/aptos-framework/doc/randomness_config.md @@ -253,7 +253,7 @@ Only used in reconfigurations to apply the pending on_new_epoch(framework: &signer) acquires RandomnessConfig { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist<RandomnessConfig>()) { - let new_config = config_buffer::extract<RandomnessConfig>(); + let new_config = config_buffer::extract_v2<RandomnessConfig>(); if (exists<RandomnessConfig>(@aptos_framework)) { *borrow_global_mut<RandomnessConfig>(@aptos_framework) = new_config; } else { diff --git a/aptos-move/framework/aptos-framework/doc/randomness_config_seqnum.md b/aptos-move/framework/aptos-framework/doc/randomness_config_seqnum.md index e9efed50c38e8..d45adf72a9350 100644 --- a/aptos-move/framework/aptos-framework/doc/randomness_config_seqnum.md +++ b/aptos-move/framework/aptos-framework/doc/randomness_config_seqnum.md @@ -131,7 +131,7 @@ Only used in reconfigurations to apply the pending RandomnessConfig
public(friend) fun on_new_epoch(framework: &signer) acquires RandomnessConfigSeqNum {
     system_addresses::assert_aptos_framework(framework);
     if (config_buffer::does_exist<RandomnessConfigSeqNum>()) {
-        let new_config = config_buffer::extract<RandomnessConfigSeqNum>();
+        let new_config = config_buffer::extract_v2<RandomnessConfigSeqNum>();
         if (exists<RandomnessConfigSeqNum>(@aptos_framework)) {
             *borrow_global_mut<RandomnessConfigSeqNum>(@aptos_framework) = new_config;
         } else {
diff --git a/aptos-move/framework/aptos-framework/doc/rate_limiter.md b/aptos-move/framework/aptos-framework/doc/rate_limiter.md
new file mode 100644
index 0000000000000..a52929b1efc8a
--- /dev/null
+++ b/aptos-move/framework/aptos-framework/doc/rate_limiter.md
@@ -0,0 +1,180 @@
+
+
+
+# Module `0x1::rate_limiter`
+
+
+
+-  [Enum Resource `RateLimiter`](#0x1_rate_limiter_RateLimiter)
+-  [Function `initialize`](#0x1_rate_limiter_initialize)
+-  [Function `request`](#0x1_rate_limiter_request)
+-  [Function `refill`](#0x1_rate_limiter_refill)
+
+
+
use 0x1::timestamp;
+
+ + + + + +## Enum Resource `RateLimiter` + + + +
enum RateLimiter has copy, drop, store, key
+
+ + + +
+Variants + + +
+TokenBucket + + +
+Fields + + +
+
+capacity: u64 +
+
+ +
+
+current_amount: u64 +
+
+ +
+
+refill_interval: u64 +
+
+ +
+
+last_refill_timestamp: u64 +
+
+ +
+
+fractional_accumulated: u64 +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Function `initialize` + + + +
public fun initialize(capacity: u64, refill_interval: u64): rate_limiter::RateLimiter
+
+ + + +
+Implementation + + +
public fun initialize(capacity: u64, refill_interval: u64): RateLimiter {
+    RateLimiter::TokenBucket {
+        capacity,
+        current_amount: capacity, // Start with a full bucket (full capacity of transactions allowed)
+        refill_interval,
+        last_refill_timestamp: timestamp::now_seconds(),
+        fractional_accumulated: 0, // Start with no fractional accumulated
+    }
+}
+
+ + + +
+ + + +## Function `request` + + + +
public fun request(limiter: &mut rate_limiter::RateLimiter, num_token_requested: u64): bool
+
+ + + +
+Implementation + + +
public fun request(limiter: &mut RateLimiter, num_token_requested: u64): bool {
+    refill(limiter);
+    if (limiter.current_amount >= num_token_requested) {
+        limiter.current_amount = limiter.current_amount - num_token_requested;
+        true
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `refill` + + + +
fun refill(limiter: &mut rate_limiter::RateLimiter)
+
+ + + +
+Implementation + + +
fun refill(limiter: &mut RateLimiter) {
+    let current_time = timestamp::now_seconds();
+    let time_passed = current_time - limiter.last_refill_timestamp;
+    // Calculate the full tokens that can be added
+    let accumulated_amount = time_passed * limiter.capacity + limiter.fractional_accumulated;
+    let new_tokens = accumulated_amount / limiter.refill_interval;
+    if (limiter.current_amount + new_tokens >= limiter.capacity) {
+        limiter.current_amount = limiter.capacity;
+        limiter.fractional_accumulated = 0;
+    } else {
+        limiter.current_amount = limiter.current_amount + new_tokens;
+        // Update the fractional amount accumulated for the next refill cycle
+        limiter.fractional_accumulated = accumulated_amount % limiter.refill_interval;
+    };
+    limiter.last_refill_timestamp = current_time;
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/reconfiguration.md b/aptos-move/framework/aptos-framework/doc/reconfiguration.md index 3415e0a820de1..216bf3e672a58 100644 --- a/aptos-move/framework/aptos-framework/doc/reconfiguration.md +++ b/aptos-move/framework/aptos-framework/doc/reconfiguration.md @@ -44,7 +44,6 @@ to synchronize configuration changes for the validators. use 0x1::storage_gas; use 0x1::system_addresses; use 0x1::timestamp; -use 0x1::transaction_fee;
@@ -396,20 +395,6 @@ Signal validators to start using new configuration. Must be called from friend c reconfiguration_state::on_reconfig_start(); - // Reconfiguration "forces the block" to end, as mentioned above. Therefore, we must process the collected fees - // explicitly so that staking can distribute them. - // - // This also handles the case when a validator is removed due to the governance proposal. In particular, removing - // the validator causes a reconfiguration. We explicitly process fees, i.e. we drain aggregatable coin and populate - // the fees table, prior to calling `on_new_epoch()`. That call, in turn, distributes transaction fees for all active - // and pending_inactive validators, which include any validator that is to be removed. - if (features::collect_and_distribute_gas_fees()) { - // All transactions after reconfiguration are Retry. Therefore, when the next - // block starts and tries to assign/burn collected fees it will be just 0 and - // nothing will be assigned. - transaction_fee::process_collected_fees(); - }; - // Call stake to compute the new validator set and distribute rewards and transaction fees. stake::on_new_epoch(); storage_gas::on_reconfig(); @@ -726,12 +711,10 @@ Make sure the caller is admin and check the resource DisableReconfiguration.
pragma verify = true;
 pragma verify_duration_estimate = 600;
-requires exists<stake::ValidatorFees>(@aptos_framework);
 let success = !(chain_status::is_genesis() || timestamp::spec_now_microseconds() == 0 || !reconfiguration_enabled())
     && timestamp::spec_now_microseconds() != global<Configuration>(@aptos_framework).last_reconfiguration_time;
 include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement;
 include success ==> aptos_coin::ExistsAptosCoin;
-include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
 aborts_if false;
 ensures success ==> global<Configuration>(@aptos_framework).epoch == old(global<Configuration>(@aptos_framework).epoch) + 1;
 ensures success ==> global<Configuration>(@aptos_framework).last_reconfiguration_time == timestamp::spec_now_microseconds();
diff --git a/aptos-move/framework/aptos-framework/doc/reconfiguration_state.md b/aptos-move/framework/aptos-framework/doc/reconfiguration_state.md
index 433cf335b9499..94acdbdd4bc9e 100644
--- a/aptos-move/framework/aptos-framework/doc/reconfiguration_state.md
+++ b/aptos-move/framework/aptos-framework/doc/reconfiguration_state.md
@@ -562,7 +562,7 @@ Abort if the current state is not "in progress".
     include  copyable_any::type_name(global<State>(@aptos_framework).variant).bytes
         == b"0x1::reconfiguration_state::StateActive" ==>
     copyable_any::UnpackAbortsIf<StateActive> {
-        x:  global<State>(@aptos_framework).variant
+        self: global<State>(@aptos_framework).variant
     };
     aborts_if copyable_any::type_name(global<State>(@aptos_framework).variant).bytes
         != b"0x1::reconfiguration_state::StateActive";
diff --git a/aptos-move/framework/aptos-framework/doc/reconfiguration_with_dkg.md b/aptos-move/framework/aptos-framework/doc/reconfiguration_with_dkg.md
index 005fd6f824902..ba6bd01bb02a0 100644
--- a/aptos-move/framework/aptos-framework/doc/reconfiguration_with_dkg.md
+++ b/aptos-move/framework/aptos-framework/doc/reconfiguration_with_dkg.md
@@ -208,8 +208,6 @@ Abort if no DKG is in progress.
     requires chain_status::is_operating();
     requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
     include staking_config::StakingRewardsConfigRequirement;
-    requires exists<stake::ValidatorFees>(@aptos_framework);
-    include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
     requires exists<features::Features>(@std);
     include config_buffer::OnNewEpochRequirement<version::Version>;
     include config_buffer::OnNewEpochRequirement<gas_schedule::GasScheduleV2>;
diff --git a/aptos-move/framework/aptos-framework/doc/resource_account.md b/aptos-move/framework/aptos-framework/doc/resource_account.md
index fa171c0b03cdc..823b41aedc6c5 100644
--- a/aptos-move/framework/aptos-framework/doc/resource_account.md
+++ b/aptos-move/framework/aptos-framework/doc/resource_account.md
@@ -363,7 +363,7 @@ the SignerCapability.
     };
 
     if (empty_container) {
-        let container = move_from(source_addr);
+        let container = move_from<Container>(source_addr);
         let Container { store } = container;
         simple_map::destroy_empty(store);
     };
@@ -467,8 +467,8 @@ the SignerCapability.
 ### Module-level Specification
 
 
-
pragma verify = true;
-pragma aborts_if_is_strict;
+
pragma verify = false;
+pragma aborts_if_is_partial;
 
@@ -486,6 +486,7 @@ the SignerCapability.
let source_addr = signer::address_of(origin);
 let resource_addr = account::spec_create_resource_address(source_addr, seed);
+let resource = create_signer::spec_create_signer(resource_addr);
 include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIfWithoutAccountLimit;
 
@@ -509,7 +510,6 @@ the SignerCapability. include aptos_account::WithdrawAbortsIf<AptosCoin>{from: origin, amount: fund_amount}; include aptos_account::GuidAbortsIf<AptosCoin>{to: resource_addr}; include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIfWithoutAccountLimit; -aborts_if coin::spec_is_account_registered<AptosCoin>(resource_addr) && coin_store_resource.frozen; // This enforces high-level requirement 3: ensures exists<aptos_framework::coin::CoinStore<AptosCoin>>(resource_addr);
@@ -547,7 +547,8 @@ the SignerCapability. -
let resource_addr = signer::address_of(resource);
+
pragma aborts_if_is_partial;
+let resource_addr = signer::address_of(resource);
 // This enforces high-level requirement 1:
 include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIf;
 // This enforces high-level requirement 2:
@@ -593,10 +594,9 @@ the SignerCapability.
     let container = global<Container>(source_addr);
     let get = len(optional_auth_key) == 0;
     let account = global<account::Account>(source_addr);
-    requires source_addr != resource_addr;
     aborts_if len(ZERO_AUTH_KEY) != 32;
-    include account::exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf;
-    include !account::exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr};
+    include account::spec_exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf;
+    include !account::spec_exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr};
     aborts_if get && !exists<account::Account>(source_addr);
     aborts_if exists<Container>(source_addr) && simple_map::spec_contains_key(container.store, resource_addr);
     aborts_if get && len(global<account::Account>(source_addr).authentication_key) != 32;
@@ -619,7 +619,8 @@ the SignerCapability.
 
 
 
-
// This enforces high-level requirement 6:
+
pragma aborts_if_is_partial;
+// This enforces high-level requirement 6:
 aborts_if !exists<Container>(source_addr);
 let resource_addr = signer::address_of(resource);
 let container = global<Container>(source_addr);
diff --git a/aptos-move/framework/aptos-framework/doc/signing_data.md b/aptos-move/framework/aptos-framework/doc/signing_data.md
new file mode 100644
index 0000000000000..2288c9d4ac9cf
--- /dev/null
+++ b/aptos-move/framework/aptos-framework/doc/signing_data.md
@@ -0,0 +1,111 @@
+
+
+
+# Module `0x1::signing_data`
+
+
+
+-  [Enum `SigningData`](#0x1_signing_data_SigningData)
+-  [Function `digest`](#0x1_signing_data_digest)
+-  [Function `authenticator`](#0x1_signing_data_authenticator)
+
+
+
+ + + + + +## Enum `SigningData` + + + +
enum SigningData has copy, drop
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+digest: vector<u8> +
+
+ +
+
+authenticator: vector<u8> +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Function `digest` + + + +
public fun digest(signing_data: &signing_data::SigningData): &vector<u8>
+
+ + + +
+Implementation + + +
public fun digest(signing_data: &SigningData): &vector<u8> {
+    &signing_data.digest
+}
+
+ + + +
+ + + +## Function `authenticator` + + + +
public fun authenticator(signing_data: &signing_data::SigningData): &vector<u8>
+
+ + + +
+Implementation + + +
public fun authenticator(signing_data: &SigningData): &vector<u8> {
+    &signing_data.authenticator
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/solana_derivable_account.md b/aptos-move/framework/aptos-framework/doc/solana_derivable_account.md new file mode 100644 index 0000000000000..b05716c460ad3 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/solana_derivable_account.md @@ -0,0 +1,493 @@ + + + +# Module `0x1::solana_derivable_account` + +Derivable account abstraction that verifies a message signed by +SIWS. +1. The message format is as follows: + + wants you to sign in with your Solana account: + + +Please confirm you explicitly initiated this request from . You are approving to execute transaction on Aptos blockchain (). + +Nonce: + +2. The abstract public key is a BCS serialized SIWSAbstractPublicKey. +3. The abstract signature is a BCS serialized SIWSAbstractSignature. +4. This module has been tested for the following wallets: +- Phantom +- Solflare +- Backpack +- OKX + + +- [Enum `SIWSAbstractSignature`](#0x1_solana_derivable_account_SIWSAbstractSignature) +- [Constants](#@Constants_0) +- [Function `deserialize_abstract_public_key`](#0x1_solana_derivable_account_deserialize_abstract_public_key) +- [Function `deserialize_abstract_signature`](#0x1_solana_derivable_account_deserialize_abstract_signature) +- [Function `construct_message`](#0x1_solana_derivable_account_construct_message) +- [Function `to_public_key_bytes`](#0x1_solana_derivable_account_to_public_key_bytes) +- [Function `authenticate_auth_data`](#0x1_solana_derivable_account_authenticate_auth_data) +- [Function `authenticate`](#0x1_solana_derivable_account_authenticate) +- [Specification](#@Specification_1) + - [Function `to_public_key_bytes`](#@Specification_1_to_public_key_bytes) + - [Function `authenticate_auth_data`](#@Specification_1_authenticate_auth_data) + - [Function `authenticate`](#@Specification_1_authenticate) + + +
use 0x1::auth_data;
+use 0x1::bcs_stream;
+use 0x1::common_account_abstractions_utils;
+use 0x1::ed25519;
+use 0x1::option;
+use 0x1::string;
+use 0x1::string_utils;
+use 0x1::transaction_context;
+use 0x1::vector;
+
+ + + + + +## Enum `SIWSAbstractSignature` + + + +
enum SIWSAbstractSignature has drop
+
+ + + +
+Variants + + +
+MessageV1 + + +
+Fields + + +
+
+signature: vector<u8> +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Constants + + + + + + +
const PUBLIC_KEY_NUM_BYTES: u64 = 32;
+
+ + + + + +Invalid public key. + + +
const EINVALID_PUBLIC_KEY: u64 = 5;
+
+ + + + + +Signature failed to verify. + + +
const EINVALID_SIGNATURE: u64 = 1;
+
+ + + + + +Invalid signature type. + + +
const EINVALID_SIGNATURE_TYPE: u64 = 4;
+
+ + + + + +Entry function payload is missing. + + +
const EMISSING_ENTRY_FUNCTION_PAYLOAD: u64 = 3;
+
+ + + + + + + +
const BASE_58_ALPHABET: vector<u8> = [49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, 76, 77, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122];
+
+ + + + + +Non base58 character found in public key. + + +
const EINVALID_BASE_58_PUBLIC_KEY: u64 = 2;
+
+ + + + + +Invalid public key length. + + +
const EINVALID_PUBLIC_KEY_LENGTH: u64 = 6;
+
+ + + + + + + +
const HEX_ALPHABET: vector<u8> = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102];
+
+ + + + + +## Function `deserialize_abstract_public_key` + +Deserializes the abstract public key which is supposed to be a bcs +serialized SIWSAbstractPublicKey. The base58_public_key is +represented in UTF8. We prefer this format because it's computationally +cheaper to decode a base58 string than to encode from raw bytes. We +require both the base58 public key in UTF8 to construct the message and +the raw bytes version to do signature verification. + + +
fun deserialize_abstract_public_key(abstract_public_key: &vector<u8>): (vector<u8>, vector<u8>)
+
+ + + +
+Implementation + + +
fun deserialize_abstract_public_key(abstract_public_key: &vector<u8>):
+(vector<u8>, vector<u8>) {
+    let stream = bcs_stream::new(*abstract_public_key);
+    let base58_public_key = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+    let domain = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+    (base58_public_key, domain)
+}
+
+ + + +
+ + + +## Function `deserialize_abstract_signature` + +Returns a tuple of the signature type and the signature. + + +
fun deserialize_abstract_signature(abstract_signature: &vector<u8>): solana_derivable_account::SIWSAbstractSignature
+
+ + + +
+Implementation + + +
fun deserialize_abstract_signature(abstract_signature: &vector<u8>): SIWSAbstractSignature {
+    let stream = bcs_stream::new(*abstract_signature);
+    let signature_type = bcs_stream::deserialize_u8(&mut stream);
+    if (signature_type == 0x00) {
+        let signature = bcs_stream::deserialize_vector<u8>(&mut stream, |x| deserialize_u8(x));
+        SIWSAbstractSignature::MessageV1 { signature }
+    } else {
+        abort(EINVALID_SIGNATURE_TYPE)
+    }
+}
+
+ + + +
+ + + +## Function `construct_message` + + + +
fun construct_message(base58_public_key: &vector<u8>, domain: &vector<u8>, entry_function_name: &vector<u8>, digest_utf8: &vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun construct_message(
+    base58_public_key: &vector<u8>,
+    domain: &vector<u8>,
+    entry_function_name: &vector<u8>,
+    digest_utf8: &vector<u8>,
+): vector<u8> {
+    let message = &mut vector[];
+    message.append(*domain);
+    message.append(b" wants you to sign in with your Solana account:\n");
+    message.append(*base58_public_key);
+    message.append(b"\n\nPlease confirm you explicitly initiated this request from ");
+    message.append(*domain);
+    message.append(b".");
+    message.append(b" You are approving to execute transaction ");
+    message.append(*entry_function_name);
+    message.append(b" on Aptos blockchain");
+    let network_name = network_name();
+    message.append(b" (");
+    message.append(network_name);
+    message.append(b")");
+    message.append(b".");
+    message.append(b"\n\nNonce: ");
+    message.append(*digest_utf8);
+    *message
+}
+
+ + + +
+ + + +## Function `to_public_key_bytes` + + + +
fun to_public_key_bytes(base58_public_key: &vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
fun to_public_key_bytes(base58_public_key: &vector<u8>): vector<u8> {
+    let bytes = vector[0u8];
+    let base = 58u16;
+
+    let i = 0;
+    while (i < base58_public_key.length()) {
+        let char = base58_public_key[i];
+        let (found, char_index) = BASE_58_ALPHABET.index_of(&char);
+        assert!(found, EINVALID_BASE_58_PUBLIC_KEY);
+
+        let j = 0;
+        let carry = (char_index as u16);
+
+        // For each existing byte, multiply by 58 and add carry
+        while (j < bytes.length()) {
+            let current = (bytes[j] as u16);
+            let new_carry = current * base + carry;
+            bytes[j] = ((new_carry & 0xff) as u8);
+            carry = new_carry >> 8;
+            j = j + 1;
+        };
+
+        // Add any remaining carry as new bytes
+        while (carry > 0) {
+            bytes.push_back((carry & 0xff) as u8);
+            carry = carry >> 8;
+        };
+
+        i = i + 1;
+    };
+
+    // Handle leading zeros (1's in Base58)
+    let i = 0;
+    while (i < base58_public_key.length() && base58_public_key[i] == 49) { // '1' is 49 in ASCII
+        bytes.push_back(0);
+        i = i + 1;
+    };
+
+    vector::reverse(&mut bytes);
+    assert!(bytes.length() == PUBLIC_KEY_NUM_BYTES, EINVALID_PUBLIC_KEY_LENGTH);
+    bytes
+}
+
+ + + +
+ + + +## Function `authenticate_auth_data` + + + +
fun authenticate_auth_data(aa_auth_data: auth_data::AbstractionAuthData, entry_function_name: &vector<u8>)
+
+ + + +
+Implementation + + +
fun authenticate_auth_data(
+    aa_auth_data: AbstractionAuthData,
+    entry_function_name: &vector<u8>
+) {
+    let abstract_public_key = aa_auth_data.derivable_abstract_public_key();
+    let (base58_public_key, domain) = deserialize_abstract_public_key(abstract_public_key);
+    let digest_utf8 = string_utils::to_string(aa_auth_data.digest()).bytes();
+
+    let public_key_bytes = to_public_key_bytes(&base58_public_key);
+    let public_key = new_validated_public_key_from_bytes(public_key_bytes);
+    assert!(public_key.is_some(), EINVALID_PUBLIC_KEY);
+    let abstract_signature = deserialize_abstract_signature(aa_auth_data.derivable_abstract_signature());
+    match (abstract_signature) {
+        SIWSAbstractSignature::MessageV1 { signature: signature_bytes } => {
+            let message = construct_message(&base58_public_key, &domain, entry_function_name, digest_utf8);
+
+            let signature = new_signature_from_bytes(signature_bytes);
+            assert!(
+                ed25519::signature_verify_strict(
+                    &signature,
+                    &public_key_into_unvalidated(public_key.destroy_some()),
+                    message,
+                ),
+                EINVALID_SIGNATURE
+            );
+        },
+    };
+}
+
+ + + +
+ + + +## Function `authenticate` + +Authorization function for domain account abstraction. + + +
public fun authenticate(account: signer, aa_auth_data: auth_data::AbstractionAuthData): signer
+
+ + + +
+Implementation + + +
public fun authenticate(account: signer, aa_auth_data: AbstractionAuthData): signer {
+    let maybe_entry_function_payload = transaction_context::entry_function_payload();
+    if (maybe_entry_function_payload.is_some()) {
+        let entry_function_payload = maybe_entry_function_payload.destroy_some();
+        let entry_function_name = entry_function_name(&entry_function_payload);
+        authenticate_auth_data(aa_auth_data, &entry_function_name);
+        account
+    } else {
+        abort(EMISSING_ENTRY_FUNCTION_PAYLOAD)
+    }
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `to_public_key_bytes` + + +
fun to_public_key_bytes(base58_public_key: &vector<u8>): vector<u8>
+
+ + + + +
ensures result.length() == PUBLIC_KEY_NUM_BYTES;
+
+ + + + + +### Function `authenticate_auth_data` + + +
fun authenticate_auth_data(aa_auth_data: auth_data::AbstractionAuthData, entry_function_name: &vector<u8>)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `authenticate` + + +
public fun authenticate(account: signer, aa_auth_data: auth_data::AbstractionAuthData): signer
+
+ + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/stake.md b/aptos-move/framework/aptos-framework/doc/stake.md index 5a199ef91f69d..564db5d3f1434 100644 --- a/aptos-move/framework/aptos-framework/doc/stake.md +++ b/aptos-move/framework/aptos-framework/doc/stake.md @@ -33,6 +33,7 @@ or if their stake drops below the min required, they would get removed at the en - [Struct `IndividualValidatorPerformance`](#0x1_stake_IndividualValidatorPerformance) - [Resource `ValidatorPerformance`](#0x1_stake_ValidatorPerformance) - [Struct `RegisterValidatorCandidateEvent`](#0x1_stake_RegisterValidatorCandidateEvent) +- [Struct `StakeManagementPermission`](#0x1_stake_StakeManagementPermission) - [Struct `RegisterValidatorCandidate`](#0x1_stake_RegisterValidatorCandidate) - [Struct `SetOperatorEvent`](#0x1_stake_SetOperatorEvent) - [Struct `SetOperator`](#0x1_stake_SetOperator) @@ -63,8 +64,8 @@ or if their stake drops below the min required, they would get removed at the en - [Resource `Ghost$ghost_active_num`](#0x1_stake_Ghost$ghost_active_num) - [Resource `Ghost$ghost_pending_inactive_num`](#0x1_stake_Ghost$ghost_pending_inactive_num) - [Constants](#@Constants_0) -- [Function `initialize_validator_fees`](#0x1_stake_initialize_validator_fees) -- [Function `add_transaction_fee`](#0x1_stake_add_transaction_fee) +- [Function `check_stake_permission`](#0x1_stake_check_stake_permission) +- [Function `grant_permission`](#0x1_stake_grant_permission) - [Function `get_lockup_secs`](#0x1_stake_get_lockup_secs) - [Function `get_remaining_lockup_secs`](#0x1_stake_get_remaining_lockup_secs) - [Function `get_stake`](#0x1_stake_get_stake) @@ -130,8 +131,6 @@ or if their stake drops below the min required, they would get removed at the en - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - [Resource `ValidatorSet`](#@Specification_1_ValidatorSet) - - [Function `initialize_validator_fees`](#@Specification_1_initialize_validator_fees) - - [Function `add_transaction_fee`](#@Specification_1_add_transaction_fee) - [Function `get_validator_state`](#@Specification_1_get_validator_state) - [Function `initialize`](#@Specification_1_initialize) - [Function `remove_validators`](#@Specification_1_remove_validators) @@ -179,6 +178,7 @@ or if their stake drops below the min required, they would get removed at the en use 0x1::fixed_point64; use 0x1::math64; use 0x1::option; +use 0x1::permissioned_signer; use 0x1::reconfiguration_state; use 0x1::signer; use 0x1::staking_config; @@ -628,6 +628,33 @@ This allows the Stake module to mint rewards to stakers. + + + + +## Struct `StakeManagementPermission` + + + +
struct StakeManagementPermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ +
@@ -1447,11 +1474,11 @@ This allows the Stake module to mint rewards to stakers. ## Resource `ValidatorFees` -Stores transaction fees assigned to validators. All fees are distributed to validators -at the end of the epoch. +DEPRECATED -
struct ValidatorFees has key
+
#[deprecated]
+struct ValidatorFees has key
 
@@ -1734,6 +1761,16 @@ Validators cannot join or leave post genesis on this test network. + + +Signer does not have permission to perform stake logic. + + +
const ENO_STAKE_PERMISSION: u64 = 28;
+
+ + + An account cannot own more than one owner capability. @@ -1756,7 +1793,7 @@ Owner capability does not exist at the provided account. -Validator set change temporarily disabled because of in-progress reconfiguration. +Validator set change temporarily disabled because of in-progress reconfiguration. Please retry after 1 minute.
const ERECONFIGURATION_IN_PROGRESS: u64 = 20;
@@ -1882,15 +1919,14 @@ Validator status enum. We can switch to proper enum later once Move supports it.
 
 
 
-
+
 
-## Function `initialize_validator_fees`
+## Function `check_stake_permission`
 
-Initializes the resource storing information about collected transaction fees per validator.
-Used by transaction_fee.move to initialize fee collection and distribution.
+Permissions
 
 
-
public(friend) fun initialize_validator_fees(aptos_framework: &signer)
+
fun check_stake_permission(s: &signer)
 
@@ -1899,13 +1935,11 @@ Used by transaction_feeImplementation -
public(friend) fun initialize_validator_fees(aptos_framework: &signer) {
-    system_addresses::assert_aptos_framework(aptos_framework);
+
inline fun check_stake_permission(s: &signer) {
     assert!(
-        !exists<ValidatorFees>(@aptos_framework),
-        error::already_exists(EFEES_TABLE_ALREADY_EXISTS)
+        permissioned_signer::check_permission_exists(s, StakeManagementPermission {}),
+        error::permission_denied(ENO_STAKE_PERMISSION),
     );
-    move_to(aptos_framework, ValidatorFees { fees_table: table::new() });
 }
 
@@ -1913,14 +1947,14 @@ Used by transaction_fee - + -## Function `add_transaction_fee` +## Function `grant_permission` -Stores the transaction fee collected to the specified validator address. +Grant permission to mutate staking on behalf of the master signer. -
public(friend) fun add_transaction_fee(validator_addr: address, fee: coin::Coin<aptos_coin::AptosCoin>)
+
public fun grant_permission(master: &signer, permissioned_signer: &signer)
 
@@ -1929,14 +1963,8 @@ Stores the transaction fee collected to the specified validator address. Implementation -
public(friend) fun add_transaction_fee(validator_addr: address, fee: Coin<AptosCoin>) acquires ValidatorFees {
-    let fees_table = &mut borrow_global_mut<ValidatorFees>(@aptos_framework).fees_table;
-    if (table::contains(fees_table, validator_addr)) {
-        let collected_fee = table::borrow_mut(fees_table, validator_addr);
-        coin::merge(collected_fee, fee);
-    } else {
-        table::add(fees_table, validator_addr, fee);
-    }
+
public fun grant_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, StakeManagementPermission {})
 }
 
@@ -2451,6 +2479,7 @@ to set later. operator: address, voter: address, ) acquires AllowedValidators, OwnerCapability, StakePool, ValidatorSet { + check_stake_permission(owner); initialize_owner(owner); move_to(owner, ValidatorConfig { consensus_pubkey: vector::empty(), @@ -2500,8 +2529,9 @@ Initialize the validator account and give ownership to the signing account. network_addresses: vector<u8>, fullnode_addresses: vector<u8>, ) acquires AllowedValidators { + check_stake_permission(account); // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks. - let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop( + let pubkey_from_pop = &bls12381::public_key_from_bytes_with_pop( consensus_pubkey, &proof_of_possession_from_bytes(proof_of_possession) ); @@ -2537,6 +2567,7 @@ Initialize the validator account and give ownership to the signing account.
fun initialize_owner(owner: &signer) acquires AllowedValidators {
+    check_stake_permission(owner);
     let owner_address = signer::address_of(owner);
     assert!(is_allowed(owner_address), error::not_found(EINELIGIBLE_VALIDATOR));
     assert!(!stake_pool_exists(owner_address), error::already_exists(EALREADY_REGISTERED));
@@ -2591,6 +2622,7 @@ Extract and return owner capability from the signing account.
 
 
 
public fun extract_owner_cap(owner: &signer): OwnerCapability acquires OwnerCapability {
+    check_stake_permission(owner);
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
     move_from<OwnerCapability>(owner_address)
@@ -2619,6 +2651,7 @@ staking pool.
 
 
 
public fun deposit_owner_cap(owner: &signer, owner_cap: OwnerCapability) {
+    check_stake_permission(owner);
     assert!(!exists<OwnerCapability>(signer::address_of(owner)), error::not_found(EOWNER_CAP_ALREADY_EXISTS));
     move_to(owner, owner_cap);
 }
@@ -2670,6 +2703,7 @@ Allows an owner to change the operator of the stake pool.
 
 
 
public entry fun set_operator(owner: &signer, new_operator: address) acquires OwnerCapability, StakePool {
+    check_stake_permission(owner);
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
     let ownership_cap = borrow_global<OwnerCapability>(owner_address);
@@ -2712,16 +2746,16 @@ Allows an account with ownership capability to change the operator of the stake
                 new_operator,
             },
         );
+    } else {
+        event::emit_event(
+            &mut stake_pool.set_operator_events,
+            SetOperatorEvent {
+                pool_address,
+                old_operator,
+                new_operator,
+            },
+        );
     };
-
-    event::emit_event(
-        &mut stake_pool.set_operator_events,
-        SetOperatorEvent {
-            pool_address,
-            old_operator,
-            new_operator,
-        },
-    );
 }
 
@@ -2746,6 +2780,7 @@ Allows an owner to change the delegated voter of the stake pool.
public entry fun set_delegated_voter(owner: &signer, new_voter: address) acquires OwnerCapability, StakePool {
+    check_stake_permission(owner);
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
     let ownership_cap = borrow_global<OwnerCapability>(owner_address);
@@ -2802,6 +2837,7 @@ Add amount of coins from the public entry fun add_stake(owner: &signer, amount: u64) acquires OwnerCapability, StakePool, ValidatorSet {
+    check_stake_permission(owner);
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
     let ownership_cap = borrow_global<OwnerCapability>(owner_address);
@@ -2843,7 +2879,7 @@ Add coins into pool_address. this requires the corresp
     // Only track and validate voting power increase for active and pending_active validator.
     // Pending_inactive validator will be removed from the validator set in the next epoch.
     // Inactive validator's total stake will be tracked when they join the validator set.
-    let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
+    let validator_set = borrow_global<ValidatorSet>(@aptos_framework);
     // Search directly rather using get_validator_state to save on unnecessary loops.
     if (option::is_some(&find_validator(&validator_set.active_validators, pool_address)) ||
         option::is_some(&find_validator(&validator_set.pending_active, pool_address))) {
@@ -2870,14 +2906,15 @@ Add coins into pool_address. this requires the corresp
                 amount_added: amount,
             },
         );
+    } else {
+        event::emit_event(
+            &mut stake_pool.add_stake_events,
+            AddStakeEvent {
+                pool_address,
+                amount_added: amount,
+            },
+        );
     };
-    event::emit_event(
-        &mut stake_pool.add_stake_events,
-        AddStakeEvent {
-            pool_address,
-            amount_added: amount,
-        },
-    );
 }
 
@@ -2902,6 +2939,7 @@ Move amount of coins from pending_inactive to active.
public entry fun reactivate_stake(owner: &signer, amount: u64) acquires OwnerCapability, StakePool {
+    check_stake_permission(owner);
     assert_reconfig_not_in_progress();
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
@@ -2952,14 +2990,15 @@ Move amount of coins from pending_inactive to active.
                 amount,
             },
         );
+    } else {
+        event::emit_event(
+            &mut stake_pool.reactivate_stake_events,
+            ReactivateStakeEvent {
+                pool_address,
+                amount,
+            },
+        );
     };
-    event::emit_event(
-        &mut stake_pool.reactivate_stake_events,
-        ReactivateStakeEvent {
-            pool_address,
-            amount,
-        },
-    );
 }
 
@@ -2989,6 +3028,7 @@ Rotate the consensus key of the validator, it'll take effect in next epoch. new_consensus_pubkey: vector<u8>, proof_of_possession: vector<u8>, ) acquires StakePool, ValidatorConfig { + check_stake_permission(operator); assert_reconfig_not_in_progress(); assert_stake_pool_exists(pool_address); @@ -2999,7 +3039,7 @@ Rotate the consensus key of the validator, it'll take effect in next epoch. let validator_info = borrow_global_mut<ValidatorConfig>(pool_address); let old_consensus_pubkey = validator_info.consensus_pubkey; // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks. - let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop( + let pubkey_from_pop = &bls12381::public_key_from_bytes_with_pop( new_consensus_pubkey, &proof_of_possession_from_bytes(proof_of_possession) ); @@ -3014,15 +3054,16 @@ Rotate the consensus key of the validator, it'll take effect in next epoch. new_consensus_pubkey, }, ); + } else { + event::emit_event( + &mut stake_pool.rotate_consensus_key_events, + RotateConsensusKeyEvent { + pool_address, + old_consensus_pubkey, + new_consensus_pubkey, + }, + ); }; - event::emit_event( - &mut stake_pool.rotate_consensus_key_events, - RotateConsensusKeyEvent { - pool_address, - old_consensus_pubkey, - new_consensus_pubkey, - }, - ); }
@@ -3052,6 +3093,7 @@ Update the network and full node addresses of the validator. This only takes eff new_network_addresses: vector<u8>, new_fullnode_addresses: vector<u8>, ) acquires StakePool, ValidatorConfig { + check_stake_permission(operator); assert_reconfig_not_in_progress(); assert_stake_pool_exists(pool_address); let stake_pool = borrow_global_mut<StakePool>(pool_address); @@ -3073,18 +3115,18 @@ Update the network and full node addresses of the validator. This only takes eff new_fullnode_addresses, }, ); + } else { + event::emit_event( + &mut stake_pool.update_network_and_fullnode_addresses_events, + UpdateNetworkAndFullnodeAddressesEvent { + pool_address, + old_network_addresses, + new_network_addresses, + old_fullnode_addresses, + new_fullnode_addresses, + }, + ); }; - event::emit_event( - &mut stake_pool.update_network_and_fullnode_addresses_events, - UpdateNetworkAndFullnodeAddressesEvent { - pool_address, - old_network_addresses, - new_network_addresses, - old_fullnode_addresses, - new_fullnode_addresses, - }, - ); - }
@@ -3109,6 +3151,7 @@ Similar to increase_lockup_with_cap but will use ownership capability from the s
public entry fun increase_lockup(owner: &signer) acquires OwnerCapability, StakePool {
+    check_stake_permission(owner);
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
     let ownership_cap = borrow_global<OwnerCapability>(owner_address);
@@ -3156,15 +3199,16 @@ directly inactive if it's not from an active validator.
                 new_locked_until_secs,
             },
         );
-    };
-    event::emit_event(
-        &mut stake_pool.increase_lockup_events,
-        IncreaseLockupEvent {
-            pool_address,
-            old_locked_until_secs,
-            new_locked_until_secs,
-        },
-    );
+    } else {
+        event::emit_event(
+            &mut stake_pool.increase_lockup_events,
+            IncreaseLockupEvent {
+                pool_address,
+                old_locked_until_secs,
+                new_locked_until_secs,
+            },
+        );
+    }
 }
 
@@ -3192,6 +3236,7 @@ This can only called by the operator of the validator/staking pool. operator: &signer, pool_address: address ) acquires StakePool, ValidatorConfig, ValidatorSet { + check_stake_permission(operator); assert!( staking_config::get_allow_validator_set_change(&staking_config::get()), error::invalid_argument(ENO_POST_GENESIS_VALIDATOR_SET_CHANGE_ALLOWED), @@ -3249,7 +3294,7 @@ This internal version can only be called by the Genesis module during Genesis. update_voting_power_increase(voting_power); // Add validator to pending_active, to be activated in the next epoch. - let validator_config = borrow_global_mut<ValidatorConfig>(pool_address); + let validator_config = borrow_global<ValidatorConfig>(pool_address); assert!(!vector::is_empty(&validator_config.consensus_pubkey), error::invalid_argument(EINVALID_PUBLIC_KEY)); // Validate the current validator set size has not exceeded the limit. @@ -3265,11 +3310,12 @@ This internal version can only be called by the Genesis module during Genesis. if (std::features::module_event_migration_enabled()) { event::emit(JoinValidatorSet { pool_address }); - }; - event::emit_event( - &mut stake_pool.join_validator_set_events, - JoinValidatorSetEvent { pool_address }, - ); + } else { + event::emit_event( + &mut stake_pool.join_validator_set_events, + JoinValidatorSetEvent { pool_address }, + ); + } }
@@ -3294,6 +3340,7 @@ Similar to unlock_with_cap but will use ownership capability from the signing ac
public entry fun unlock(owner: &signer, amount: u64) acquires OwnerCapability, StakePool {
+    check_stake_permission(owner);
     assert_reconfig_not_in_progress();
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
@@ -3346,14 +3393,15 @@ Unlock amount from the active stake. Only possible if the lockup ha
                 amount_unlocked: amount,
             },
         );
+    } else {
+        event::emit_event(
+            &mut stake_pool.unlock_stake_events,
+            UnlockStakeEvent {
+                pool_address,
+                amount_unlocked: amount,
+            },
+        );
     };
-    event::emit_event(
-        &mut stake_pool.unlock_stake_events,
-        UnlockStakeEvent {
-            pool_address,
-            amount_unlocked: amount,
-        },
-    );
 }
 
@@ -3381,6 +3429,7 @@ Withdraw from account's inacti owner: &signer, withdraw_amount: u64 ) acquires OwnerCapability, StakePool, ValidatorSet { + check_stake_permission(owner); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); let ownership_cap = borrow_global<OwnerCapability>(owner_address); @@ -3437,14 +3486,15 @@ Withdraw from pool_address's inactive stake with the corresponding amount_withdrawn: withdraw_amount, }, ); + } else { + event::emit_event( + &mut stake_pool.withdraw_stake_events, + WithdrawStakeEvent { + pool_address, + amount_withdrawn: withdraw_amount, + }, + ); }; - event::emit_event( - &mut stake_pool.withdraw_stake_events, - WithdrawStakeEvent { - pool_address, - amount_withdrawn: withdraw_amount, - }, - ); coin::extract(&mut stake_pool.inactive, withdraw_amount) } @@ -3479,6 +3529,7 @@ Can only be called by the operator of the validator/staking pool. operator: &signer, pool_address: address ) acquires StakePool, ValidatorSet { + check_stake_permission(operator); assert_reconfig_not_in_progress(); let config = staking_config::get(); assert!( @@ -3521,13 +3572,14 @@ Can only be called by the operator of the validator/staking pool. if (std::features::module_event_migration_enabled()) { event::emit(LeaveValidatorSet { pool_address }); + } else { + event::emit_event( + &mut stake_pool.leave_validator_set_events, + LeaveValidatorSetEvent { + pool_address, + }, + ); }; - event::emit_event( - &mut stake_pool.leave_validator_set_events, - LeaveValidatorSetEvent { - pool_address, - }, - ); }; }
@@ -3665,7 +3717,7 @@ power.
public(friend) fun on_new_epoch(
-) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees {
+) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet {
     let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework);
     let config = staking_config::get();
     let validator_perf = borrow_global_mut<ValidatorPerformance>(@aptos_framework);
@@ -3706,8 +3758,8 @@ power.
     }) {
         let old_validator_info = vector::borrow_mut(&mut validator_set.active_validators, i);
         let pool_address = old_validator_info.addr;
-        let validator_config = borrow_global_mut<ValidatorConfig>(pool_address);
-        let stake_pool = borrow_global_mut<StakePool>(pool_address);
+        let validator_config = borrow_global<ValidatorConfig>(pool_address);
+        let stake_pool = borrow_global<StakePool>(pool_address);
         let new_validator_info = generate_validator_info(pool_address, stake_pool, *validator_config);
 
         // A validator needs at least the min stake required to join the validator set.
@@ -3826,7 +3878,7 @@ Return the ValidatorConsensusInfo of each current validator, sorted
 Implementation
 
 
-
public fun next_validator_consensus_infos(): vector<ValidatorConsensusInfo> acquires ValidatorSet, ValidatorPerformance, StakePool, ValidatorFees, ValidatorConfig {
+
public fun next_validator_consensus_infos(): vector<ValidatorConsensusInfo> acquires ValidatorSet, ValidatorPerformance, StakePool, ValidatorConfig {
     // Init.
     let cur_validator_set = borrow_global<ValidatorSet>(@aptos_framework);
     let staking_config = staking_config::get();
@@ -3881,25 +3933,16 @@ Return the ValidatorConsensusInfo of each current validator, sorted
             0
         };
 
-        let cur_fee = 0;
-        if (features::collect_and_distribute_gas_fees()) {
-            let fees_table = &borrow_global<ValidatorFees>(@aptos_framework).fees_table;
-            if (table::contains(fees_table, candidate.addr)) {
-                let fee_coin = table::borrow(fees_table, candidate.addr);
-                cur_fee = coin::value(fee_coin);
-            }
-        };
-
         let lockup_expired = get_reconfig_start_time_secs() >= stake_pool.locked_until_secs;
         spec {
-            assume cur_active + cur_pending_active + cur_reward + cur_fee <= MAX_U64;
-            assume cur_active + cur_pending_inactive + cur_pending_active + cur_reward + cur_fee <= MAX_U64;
+            assume cur_active + cur_pending_active + cur_reward <= MAX_U64;
+            assume cur_active + cur_pending_inactive + cur_pending_active + cur_reward <= MAX_U64;
         };
         let new_voting_power =
             cur_active
             + if (lockup_expired) { 0 } else { cur_pending_inactive }
             + cur_pending_active
-            + cur_reward + cur_fee;
+            + cur_reward;
 
         if (new_voting_power >= minimum_stake) {
             let config = *borrow_global<ValidatorConfig>(candidate.addr);
@@ -4078,7 +4121,7 @@ This function shouldn't abort.
     validator_perf: &ValidatorPerformance,
     pool_address: address,
     staking_config: &StakingConfig,
-) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorFees {
+) acquires StakePool, AptosCoinCapabilities, ValidatorConfig {
     let stake_pool = borrow_global_mut<StakePool>(pool_address);
     let validator_config = borrow_global<ValidatorConfig>(pool_address);
     let cur_validator_perf = vector::borrow(&validator_perf.validators, validator_config.validator_index);
@@ -4111,15 +4154,6 @@ This function shouldn't abort.
     // Pending active stake can now be active.
     coin::merge(&mut stake_pool.active, coin::extract_all(&mut stake_pool.pending_active));
 
-    // Additionally, distribute transaction fees.
-    if (features::collect_and_distribute_gas_fees()) {
-        let fees_table = &mut borrow_global_mut<ValidatorFees>(@aptos_framework).fees_table;
-        if (table::contains(fees_table, pool_address)) {
-            let coin = table::remove(fees_table, pool_address);
-            coin::merge(&mut stake_pool.active, coin);
-        };
-    };
-
     // Pending inactive stake is only fully unlocked and moved into inactive if the current lockup cycle has expired
     let current_lockup_expiration = stake_pool.locked_until_secs;
     if (get_reconfig_start_time_secs() >= current_lockup_expiration) {
@@ -4131,14 +4165,15 @@ This function shouldn't abort.
 
     if (std::features::module_event_migration_enabled()) {
         event::emit(DistributeRewards { pool_address, rewards_amount });
+    } else {
+        event::emit_event(
+            &mut stake_pool.distribute_rewards_events,
+            DistributeRewardsEvent {
+                pool_address,
+                rewards_amount,
+            },
+        );
     };
-    event::emit_event(
-        &mut stake_pool.distribute_rewards_events,
-        DistributeRewardsEvent {
-            pool_address,
-            rewards_amount,
-        },
-    );
 }
 
@@ -4621,6 +4656,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
pragma verify = true;
+pragma aborts_if_is_partial;
 invariant [suspendable] exists<ValidatorSet>(@aptos_framework) ==> validator_set_is_valid();
 invariant [suspendable] chain_status::is_operating() ==> exists<AptosCoinCapabilities>(@aptos_framework);
 invariant [suspendable] chain_status::is_operating() ==> exists<ValidatorPerformance>(@aptos_framework);
@@ -4640,6 +4676,115 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
+
+
+
+
+
fun spec_rewards_amount(
+   stake_amount: u64,
+   num_successful_proposals: u64,
+   num_total_proposals: u64,
+   rewards_rate: u64,
+   rewards_rate_denominator: u64,
+): u64;
+
+ + + + + + + +
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
+   exists i in 0..len(validators): validators[i].addr == addr
+}
+
+ + + + + + + +
fun spec_is_current_epoch_validator(pool_address: address): bool {
+   let validator_set = global<ValidatorSet>(@aptos_framework);
+   !spec_contains(validator_set.pending_active, pool_address)
+       && (spec_contains(validator_set.active_validators, pool_address)
+       || spec_contains(validator_set.pending_inactive, pool_address))
+}
+
+ + + + + + + +
schema ResourceRequirement {
+    requires exists<AptosCoinCapabilities>(@aptos_framework);
+    requires exists<ValidatorPerformance>(@aptos_framework);
+    requires exists<ValidatorSet>(@aptos_framework);
+    requires exists<StakingConfig>(@aptos_framework);
+    requires exists<StakingRewardsConfig>(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled();
+    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+}
+
+ + + + + + + +
fun spec_get_reward_rate_1(config: StakingConfig): num {
+   if (features::spec_periodical_reward_rate_decrease_enabled()) {
+       let epoch_rewards_rate = global<staking_config::StakingRewardsConfig>(@aptos_framework).rewards_rate;
+       if (epoch_rewards_rate.value == 0) {
+           0
+       } else {
+           let denominator_0 = aptos_std::fixed_point64::spec_divide_u128(staking_config::MAX_REWARDS_RATE, epoch_rewards_rate);
+           let denominator = if (denominator_0 > MAX_U64) {
+               MAX_U64
+           } else {
+               denominator_0
+           };
+           let nominator = aptos_std::fixed_point64::spec_multiply_u128(denominator, epoch_rewards_rate);
+           nominator
+       }
+   } else {
+           config.rewards_rate
+   }
+}
+
+ + + + + + + +
fun spec_get_reward_rate_2(config: StakingConfig): num {
+   if (features::spec_periodical_reward_rate_decrease_enabled()) {
+       let epoch_rewards_rate = global<staking_config::StakingRewardsConfig>(@aptos_framework).rewards_rate;
+       if (epoch_rewards_rate.value == 0) {
+           1
+       } else {
+           let denominator_0 = aptos_std::fixed_point64::spec_divide_u128(staking_config::MAX_REWARDS_RATE, epoch_rewards_rate);
+           let denominator = if (denominator_0 > MAX_U64) {
+               MAX_U64
+           } else {
+               denominator_0
+           };
+           denominator
+       }
+   } else {
+           config.rewards_rate_denominator
+   }
+}
+
+ + + ### Resource `ValidatorSet` @@ -4781,83 +4926,38 @@ Returns validator's next epoch voting power, including pending_active, active, a - + -### Function `initialize_validator_fees` +### Function `get_validator_state` -
public(friend) fun initialize_validator_fees(aptos_framework: &signer)
+
#[view]
+public fun get_validator_state(pool_address: address): u64
 
-
let aptos_addr = signer::address_of(aptos_framework);
-aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
-aborts_if exists<ValidatorFees>(aptos_addr);
-ensures exists<ValidatorFees>(aptos_addr);
+
aborts_if !exists<ValidatorSet>(@aptos_framework);
+let validator_set = global<ValidatorSet>(@aptos_framework);
+ensures result == VALIDATOR_STATUS_PENDING_ACTIVE ==> spec_contains(validator_set.pending_active, pool_address);
+ensures result == VALIDATOR_STATUS_ACTIVE ==> spec_contains(validator_set.active_validators, pool_address);
+ensures result == VALIDATOR_STATUS_PENDING_INACTIVE ==> spec_contains(validator_set.pending_inactive, pool_address);
+ensures result == VALIDATOR_STATUS_INACTIVE ==> (
+    !spec_contains(validator_set.pending_active, pool_address)
+        && !spec_contains(validator_set.active_validators, pool_address)
+        && !spec_contains(validator_set.pending_inactive, pool_address)
+);
 
- + -### Function `add_transaction_fee` +### Function `initialize` -
public(friend) fun add_transaction_fee(validator_addr: address, fee: coin::Coin<aptos_coin::AptosCoin>)
-
- - - - -
aborts_if !exists<ValidatorFees>(@aptos_framework);
-let fees_table = global<ValidatorFees>(@aptos_framework).fees_table;
-let post post_fees_table = global<ValidatorFees>(@aptos_framework).fees_table;
-let collected_fee = table::spec_get(fees_table, validator_addr);
-let post post_collected_fee = table::spec_get(post_fees_table, validator_addr);
-ensures if (table::spec_contains(fees_table, validator_addr)) {
-    post_collected_fee.value == collected_fee.value + fee.value
-} else {
-    table::spec_contains(post_fees_table, validator_addr) &&
-    table::spec_get(post_fees_table, validator_addr) == fee
-};
-
- - - - - -### Function `get_validator_state` - - -
#[view]
-public fun get_validator_state(pool_address: address): u64
-
- - - - -
aborts_if !exists<ValidatorSet>(@aptos_framework);
-let validator_set = global<ValidatorSet>(@aptos_framework);
-ensures result == VALIDATOR_STATUS_PENDING_ACTIVE ==> spec_contains(validator_set.pending_active, pool_address);
-ensures result == VALIDATOR_STATUS_ACTIVE ==> spec_contains(validator_set.active_validators, pool_address);
-ensures result == VALIDATOR_STATUS_PENDING_INACTIVE ==> spec_contains(validator_set.pending_inactive, pool_address);
-ensures result == VALIDATOR_STATUS_INACTIVE ==> (
-    !spec_contains(validator_set.pending_active, pool_address)
-        && !spec_contains(validator_set.active_validators, pool_address)
-        && !spec_contains(validator_set.pending_inactive, pool_address)
-);
-
- - - - - -### Function `initialize` - - -
public(friend) fun initialize(aptos_framework: &signer)
+
public(friend) fun initialize(aptos_framework: &signer)
 
@@ -4912,6 +5012,11 @@ Returns validator's next epoch voting power, including pending_active, active, a
pragma verify_duration_estimate = 120;
+pragma verify = false;
+pragma aborts_if_is_partial;
+include AbortsIfSignerPermissionStake {
+    s: owner
+};
 include ResourceRequirement;
 let addr = signer::address_of(owner);
 ensures global<ValidatorConfig>(addr) == ValidatorConfig {
@@ -4943,7 +5048,11 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
let pubkey_from_pop = bls12381::spec_public_key_from_bytes_with_pop(
+
pragma verify = false;
+include AbortsIfSignerPermissionStake {
+    s: account
+};
+let pubkey_from_pop = bls12381::spec_public_key_from_bytes_with_pop(
     consensus_pubkey,
     proof_of_possession_from_bytes(proof_of_possession)
 );
@@ -4982,6 +5091,9 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
pragma verify_duration_estimate = 300;
+include AbortsIfSignerPermissionStake {
+    s: owner
+};
 let owner_address = signer::address_of(owner);
 aborts_if !exists<OwnerCapability>(owner_address);
 ensures !exists<OwnerCapability>(owner_address);
@@ -5000,7 +5112,10 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
let owner_address = signer::address_of(owner);
+
include AbortsIfSignerPermissionStake {
+    s: owner
+};
+let owner_address = signer::address_of(owner);
 aborts_if exists<OwnerCapability>(owner_address);
 ensures exists<OwnerCapability>(owner_address);
 ensures global<OwnerCapability>(owner_address) == owner_cap;
@@ -5060,8 +5175,11 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
pragma verify_duration_estimate = 120;
+
pragma verify = false;
 pragma aborts_if_is_partial;
+include AbortsIfSignerPermissionStake {
+    s: owner
+};
 aborts_if reconfiguration_state::spec_is_in_progress();
 include ResourceRequirement;
 include AddStakeAbortsIfAndEnsures;
@@ -5081,7 +5199,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
pragma disable_invariants_in_body;
-pragma verify_duration_estimate = 300;
+pragma verify = false;
 include ResourceRequirement;
 let amount = coins.value;
 aborts_if reconfiguration_state::spec_is_in_progress();
@@ -5126,7 +5244,10 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
let pre_stake_pool = global<StakePool>(pool_address);
+
include AbortsIfSignerPermissionStake {
+    s: operator
+};
+let pre_stake_pool = global<StakePool>(pool_address);
 let post validator_info = global<ValidatorConfig>(pool_address);
 aborts_if reconfiguration_state::spec_is_in_progress();
 aborts_if !exists<StakePool>(pool_address);
@@ -5155,7 +5276,10 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
let pre_stake_pool = global<StakePool>(pool_address);
+
include AbortsIfSignerPermissionStake {
+    s: operator
+};
+let pre_stake_pool = global<StakePool>(pool_address);
 let post validator_info = global<ValidatorConfig>(pool_address);
 modifies global<ValidatorConfig>(pool_address);
 include StakedValueNochange;
@@ -5209,7 +5333,11 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
pragma disable_invariants_in_body;
+
pragma verify_duration_estimate = 60;
+pragma disable_invariants_in_body;
+include AbortsIfSignerPermissionStake {
+    s: operator
+};
 aborts_if !staking_config::get_allow_validator_set_change(staking_config::get());
 aborts_if !exists<StakePool>(pool_address);
 aborts_if !exists<ValidatorConfig>(pool_address);
@@ -5287,6 +5415,9 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
pragma verify = false;
+include AbortsIfSignerPermissionStake {
+    s: owner
+};
 aborts_if reconfiguration_state::spec_is_in_progress();
 let addr = signer::address_of(owner);
 let ownership_cap = global<OwnerCapability>(addr);
@@ -5333,6 +5464,9 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
pragma disable_invariants_in_body;
 requires chain_status::is_operating();
+include AbortsIfSignerPermissionStake {
+    s: operator
+};
 aborts_if reconfiguration_state::spec_is_in_progress();
 let config = staking_config::get();
 aborts_if !staking_config::get_allow_validator_set_change(config);
@@ -5469,301 +5603,6 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
-
-
-
-
schema AddStakeWithCapAbortsIfAndEnsures {
-    owner_cap: OwnerCapability;
-    amount: u64;
-    let pool_address = owner_cap.pool_address;
-    aborts_if !exists<StakePool>(pool_address);
-    let config = global<staking_config::StakingConfig>(@aptos_framework);
-    let validator_set = global<ValidatorSet>(@aptos_framework);
-    let voting_power_increase_limit = config.voting_power_increase_limit;
-    let post post_validator_set = global<ValidatorSet>(@aptos_framework);
-    let update_voting_power_increase = amount != 0 && (spec_contains(validator_set.active_validators, pool_address)
-                                                       || spec_contains(validator_set.pending_active, pool_address));
-    aborts_if update_voting_power_increase && validator_set.total_joining_power + amount > MAX_U128;
-    ensures update_voting_power_increase ==> post_validator_set.total_joining_power == validator_set.total_joining_power + amount;
-    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
-            && validator_set.total_voting_power * voting_power_increase_limit > MAX_U128;
-    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
-            && validator_set.total_joining_power + amount > validator_set.total_voting_power * voting_power_increase_limit / 100;
-    let stake_pool = global<StakePool>(pool_address);
-    let post post_stake_pool = global<StakePool>(pool_address);
-    let value_pending_active = stake_pool.pending_active.value;
-    let value_active = stake_pool.active.value;
-    ensures amount != 0 && spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.pending_active.value == value_pending_active + amount;
-    ensures amount != 0 && !spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.active.value == value_active + amount;
-    let maximum_stake = config.maximum_stake;
-    let value_pending_inactive = stake_pool.pending_inactive.value;
-    let next_epoch_voting_power = value_pending_active + value_active + value_pending_inactive;
-    let voting_power = next_epoch_voting_power + amount;
-    aborts_if amount != 0 && voting_power > MAX_U64;
-    aborts_if amount != 0 && voting_power > maximum_stake;
-}
-
- - - - - - - -
schema AddStakeAbortsIfAndEnsures {
-    owner: signer;
-    amount: u64;
-    let owner_address = signer::address_of(owner);
-    aborts_if !exists<OwnerCapability>(owner_address);
-    let owner_cap = global<OwnerCapability>(owner_address);
-    include AddStakeWithCapAbortsIfAndEnsures { owner_cap };
-}
-
- - - - - - - -
fun spec_is_allowed(account: address): bool {
-   if (!exists<AllowedValidators>(@aptos_framework)) {
-       true
-   } else {
-       let allowed = global<AllowedValidators>(@aptos_framework);
-       contains(allowed.accounts, account)
-   }
-}
-
- - - - - - - -
fun spec_find_validator(v: vector<ValidatorInfo>, addr: address): Option<u64>;
-
- - - - - - - -
fun spec_validators_are_initialized(validators: vector<ValidatorInfo>): bool {
-   forall i in 0..len(validators):
-       spec_has_stake_pool(validators[i].addr) &&
-           spec_has_validator_config(validators[i].addr)
-}
-
- - - - - - - -
fun spec_validators_are_initialized_addrs(addrs: vector<address>): bool {
-   forall i in 0..len(addrs):
-       spec_has_stake_pool(addrs[i]) &&
-           spec_has_validator_config(addrs[i])
-}
-
- - - - - - - -
fun spec_validator_indices_are_valid(validators: vector<ValidatorInfo>): bool {
-   spec_validator_indices_are_valid_addr(validators, spec_validator_index_upper_bound()) &&
-       spec_validator_indices_are_valid_config(validators, spec_validator_index_upper_bound())
-}
-
- - - - - - - -
fun spec_validator_indices_are_valid_addr(validators: vector<ValidatorInfo>, upper_bound: u64): bool {
-   forall i in 0..len(validators):
-       global<ValidatorConfig>(validators[i].addr).validator_index < upper_bound
-}
-
- - - - - - - -
fun spec_validator_indices_are_valid_config(validators: vector<ValidatorInfo>, upper_bound: u64): bool {
-   forall i in 0..len(validators):
-       validators[i].config.validator_index < upper_bound
-}
-
- - - - - - - -
fun spec_validator_indices_active_pending_inactive(validator_set: ValidatorSet): bool {
-   len(validator_set.pending_inactive) + len(validator_set.active_validators) == spec_validator_index_upper_bound()
-}
-
- - - - - - - -
fun spec_validator_index_upper_bound(): u64 {
-   len(global<ValidatorPerformance>(@aptos_framework).validators)
-}
-
- - - - - - - -
fun spec_has_stake_pool(a: address): bool {
-   exists<StakePool>(a)
-}
-
- - - - - - - -
fun spec_has_validator_config(a: address): bool {
-   exists<ValidatorConfig>(a)
-}
-
- - - - - - - -
fun spec_rewards_amount(
-   stake_amount: u64,
-   num_successful_proposals: u64,
-   num_total_proposals: u64,
-   rewards_rate: u64,
-   rewards_rate_denominator: u64,
-): u64;
-
- - - - - - - -
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
-   exists i in 0..len(validators): validators[i].addr == addr
-}
-
- - - - - - - -
fun spec_is_current_epoch_validator(pool_address: address): bool {
-   let validator_set = global<ValidatorSet>(@aptos_framework);
-   !spec_contains(validator_set.pending_active, pool_address)
-       && (spec_contains(validator_set.active_validators, pool_address)
-       || spec_contains(validator_set.pending_inactive, pool_address))
-}
-
- - - - - - - -
schema ResourceRequirement {
-    requires exists<AptosCoinCapabilities>(@aptos_framework);
-    requires exists<ValidatorPerformance>(@aptos_framework);
-    requires exists<ValidatorSet>(@aptos_framework);
-    requires exists<StakingConfig>(@aptos_framework);
-    requires exists<StakingRewardsConfig>(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled();
-    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
-    requires exists<ValidatorFees>(@aptos_framework);
-}
-
- - - - - - - -
fun spec_get_reward_rate_1(config: StakingConfig): num {
-   if (features::spec_periodical_reward_rate_decrease_enabled()) {
-       let epoch_rewards_rate = global<staking_config::StakingRewardsConfig>(@aptos_framework).rewards_rate;
-       if (epoch_rewards_rate.value == 0) {
-           0
-       } else {
-           let denominator_0 = aptos_std::fixed_point64::spec_divide_u128(staking_config::MAX_REWARDS_RATE, epoch_rewards_rate);
-           let denominator = if (denominator_0 > MAX_U64) {
-               MAX_U64
-           } else {
-               denominator_0
-           };
-           let nominator = aptos_std::fixed_point64::spec_multiply_u128(denominator, epoch_rewards_rate);
-           nominator
-       }
-   } else {
-           config.rewards_rate
-   }
-}
-
- - - - - - - -
fun spec_get_reward_rate_2(config: StakingConfig): num {
-   if (features::spec_periodical_reward_rate_decrease_enabled()) {
-       let epoch_rewards_rate = global<staking_config::StakingRewardsConfig>(@aptos_framework).rewards_rate;
-       if (epoch_rewards_rate.value == 0) {
-           1
-       } else {
-           let denominator_0 = aptos_std::fixed_point64::spec_divide_u128(staking_config::MAX_REWARDS_RATE, epoch_rewards_rate);
-           let denominator = if (denominator_0 > MAX_U64) {
-               MAX_U64
-           } else {
-               denominator_0
-           };
-           denominator
-       }
-   } else {
-           config.rewards_rate_denominator
-   }
-}
-
- - - ### Function `update_stake_pool` @@ -5800,16 +5639,9 @@ Returns validator's next epoch voting power, including pending_active, active, a let post post_stake_pool = global<StakePool>(pool_address); let post post_active_value = post_stake_pool.active.value; let post post_pending_inactive_value = post_stake_pool.pending_inactive.value; -let fees_table = global<ValidatorFees>(@aptos_framework).fees_table; -let post post_fees_table = global<ValidatorFees>(@aptos_framework).fees_table; let post post_inactive_value = post_stake_pool.inactive.value; ensures post_stake_pool.pending_active.value == 0; -ensures if (features::spec_is_enabled(features::COLLECT_AND_DISTRIBUTE_GAS_FEES) && table::spec_contains(fees_table, pool_address)) { - !table::spec_contains(post_fees_table, pool_address) && - post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value + table::spec_get(fees_table, pool_address).value -} else { - post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value -}; +ensures post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value; ensures if (spec_get_reconfig_start_time_secs() >= stake_pool.locked_until_secs) { post_pending_inactive_value == 0 && post_inactive_value == stake_pool.inactive.value + stake_pool.pending_inactive.value + rewards_amount_2 @@ -5821,6 +5653,19 @@ Returns validator's next epoch voting power, including pending_active, active, a + + + +
schema AbortsIfSignerPermissionStake {
+    s: signer;
+    let perm = StakeManagementPermission {};
+    aborts_if !permissioned_signer::spec_check_permission_exists(s, perm);
+}
+
+ + + + @@ -5831,7 +5676,6 @@ Returns validator's next epoch voting power, including pending_active, active, a aborts_if !exists<ValidatorConfig>(pool_address); aborts_if global<ValidatorConfig>(pool_address).validator_index >= len(validator_perf.validators); let aptos_addr = type_info::type_of<AptosCoin>().account_address; - aborts_if !exists<ValidatorFees>(aptos_addr); let stake_pool = global<StakePool>(pool_address); include DistributeRewardsAbortsIf {stake: stake_pool.active}; include DistributeRewardsAbortsIf {stake: stake_pool.pending_inactive}; @@ -5883,6 +5727,17 @@ Returns validator's next epoch voting power, including pending_active, active, a + + + + +
fun spec_get_lockup_secs(pool_address: address): u64 {
+   global<StakePool>(pool_address).locked_until_secs
+}
+
+ + + ### Function `calculate_rewards_amount` @@ -5896,6 +5751,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
pragma opaque;
 pragma verify_duration_estimate = 300;
+pragma verify = false;
 requires rewards_rate <= MAX_REWARDS_RATE;
 requires rewards_rate_denominator > 0;
 requires rewards_rate <= rewards_rate_denominator;
@@ -5928,7 +5784,8 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
include ResourceRequirement;
+
pragma aborts_if_is_partial;
+include ResourceRequirement;
 requires rewards_rate <= MAX_REWARDS_RATE;
 requires rewards_rate_denominator > 0;
 requires rewards_rate <= rewards_rate_denominator;
diff --git a/aptos-move/framework/aptos-framework/doc/staking_config.md b/aptos-move/framework/aptos-framework/doc/staking_config.md
index 244ed0c23647e..16c8ccb4d0a2b 100644
--- a/aptos-move/framework/aptos-framework/doc/staking_config.md
+++ b/aptos-move/framework/aptos-framework/doc/staking_config.md
@@ -188,22 +188,22 @@ Staking reward configurations that will be stored with the @aptos_framework acco
 
 
 
-
+
 
-Denominator of number in basis points. 1 bps(basis points) = 0.01%.
+The function has been deprecated.
 
 
-
const BPS_DENOMINATOR: u64 = 10000;
+
const EDEPRECATED_FUNCTION: u64 = 10;
 
- + -The function has been deprecated. +Denominator of number in basis points. 1 bps(basis points) = 0.01%. -
const EDEPRECATED_FUNCTION: u64 = 10;
+
const BPS_DENOMINATOR: u64 = 10000;
 
@@ -386,6 +386,17 @@ Only called during genesis. rewards_rate_denominator, voting_power_increase_limit, }); + + // Initialize StakingRewardsConfig with the given rewards_rate and rewards_rate_denominator, + // while setting min_rewards_rate and rewards_rate_decrease_rate to 0. + initialize_rewards( + aptos_framework, + fixed_point64::create_from_rational((rewards_rate as u128), (rewards_rate_denominator as u128)), + fixed_point64::create_from_rational(0, 1000), + ONE_YEAR_IN_SECS, + 0, + fixed_point64::create_from_rational(0, 1000), + ); }
@@ -1072,6 +1083,7 @@ Can only be called as part of the Aptos governance proposal process established
invariant [suspendable] chain_status::is_operating() ==> exists<StakingConfig>(@aptos_framework);
+invariant [suspendable] chain_status::is_operating() ==> exists<StakingRewardsConfig>(@aptos_framework);
 pragma verify = true;
 pragma aborts_if_is_strict;
 
@@ -1223,6 +1235,7 @@ StakingConfig does not exist under the aptos_framework before creating it.
let addr = signer::address_of(aptos_framework);
+requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
 // This enforces high-level requirement 1:
 aborts_if addr != @aptos_framework;
 aborts_if minimum_stake > maximum_stake || maximum_stake == 0;
@@ -1234,7 +1247,9 @@ StakingConfig does not exist under the aptos_framework before creating it.
 aborts_if rewards_rate > MAX_REWARDS_RATE;
 aborts_if rewards_rate > rewards_rate_denominator;
 aborts_if exists<StakingConfig>(addr);
+aborts_if exists<StakingRewardsConfig>(addr);
 ensures exists<StakingConfig>(addr);
+ensures exists<StakingRewardsConfig>(addr);
 
diff --git a/aptos-move/framework/aptos-framework/doc/staking_contract.md b/aptos-move/framework/aptos-framework/doc/staking_contract.md index c2d9178ff06fc..534999c039be2 100644 --- a/aptos-move/framework/aptos-framework/doc/staking_contract.md +++ b/aptos-move/framework/aptos-framework/doc/staking_contract.md @@ -91,6 +91,7 @@ pool. - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) + - [Struct `StakingContract`](#@Specification_1_StakingContract) - [Function `stake_pool_address`](#@Specification_1_stake_pool_address) - [Function `last_recorded_principal`](#@Specification_1_last_recorded_principal) - [Function `commission_percentage`](#@Specification_1_commission_percentage) @@ -98,6 +99,7 @@ pool. - [Function `pending_distribution_counts`](#@Specification_1_pending_distribution_counts) - [Function `staking_contract_exists`](#@Specification_1_staking_contract_exists) - [Function `beneficiary_for_operator`](#@Specification_1_beneficiary_for_operator) + - [Function `get_expected_stake_pool_address`](#@Specification_1_get_expected_stake_pool_address) - [Function `create_staking_contract`](#@Specification_1_create_staking_contract) - [Function `create_staking_contract_with_coins`](#@Specification_1_create_staking_contract_with_coins) - [Function `add_stake`](#@Specification_1_add_stake) @@ -1279,7 +1281,7 @@ Commission percentage has to be between 0 and 100. -Chaning beneficiaries for operators is not supported. +Changing beneficiaries for operators is not supported.
const EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED: u64 = 9;
@@ -1733,11 +1735,12 @@ Staker can call this function to create a simple staking contract with a specifi
 
     if (std::features::module_event_migration_enabled()) {
         emit(CreateStakingContract { operator, voter, pool_address, principal, commission_percentage });
+    } else {
+        emit_event(
+            &mut store.create_staking_contract_events,
+            CreateStakingContractEvent { operator, voter, pool_address, principal, commission_percentage },
+        );
     };
-    emit_event(
-        &mut store.create_staking_contract_events,
-        CreateStakingContractEvent { operator, voter, pool_address, principal, commission_percentage },
-    );
     pool_address
 }
 
@@ -1777,11 +1780,12 @@ Add more stake to an existing staking contract. let pool_address = staking_contract.pool_address; if (std::features::module_event_migration_enabled()) { emit(AddStake { operator, pool_address, amount }); + } else { + emit_event( + &mut store.add_stake_events, + AddStakeEvent { operator, pool_address, amount }, + ); }; - emit_event( - &mut store.add_stake_events, - AddStakeEvent { operator, pool_address, amount }, - ); }
@@ -1817,12 +1821,12 @@ Convenient function to allow the staker to update the voter address in a staking if (std::features::module_event_migration_enabled()) { emit(UpdateVoter { operator, pool_address, old_voter, new_voter }); + } else { + emit_event( + &mut store.update_voter_events, + UpdateVoterEvent { operator, pool_address, old_voter, new_voter }, + ); }; - emit_event( - &mut store.update_voter_events, - UpdateVoterEvent { operator, pool_address, old_voter, new_voter }, - ); - }
@@ -1857,8 +1861,9 @@ Convenient function to allow the staker to reset their stake pool's lockup perio if (std::features::module_event_migration_enabled()) { emit(ResetLockup { operator, pool_address }); + } else { + emit_event(&mut store.reset_lockup_events, ResetLockupEvent { operator, pool_address }); }; - emit_event(&mut store.reset_lockup_events, ResetLockupEvent { operator, pool_address }); }
@@ -1921,11 +1926,12 @@ TODO: fix the typo in function name. commision -> commission emit( UpdateCommission { staker: staker_address, operator, old_commission_percentage, new_commission_percentage } ); + } else { + emit_event( + &mut borrow_global_mut<StakingGroupUpdateCommissionEvent>(staker_address).update_commission_events, + UpdateCommissionEvent { staker: staker_address, operator, old_commission_percentage, new_commission_percentage } + ); }; - emit_event( - &mut borrow_global_mut<StakingGroupUpdateCommissionEvent>(staker_address).update_commission_events, - UpdateCommissionEvent { staker: staker_address, operator, old_commission_percentage, new_commission_percentage } - ); }
@@ -2028,11 +2034,12 @@ Only staker, operator or beneficiary can call this. let pool_address = staking_contract.pool_address; if (std::features::module_event_migration_enabled()) { emit(RequestCommission { operator, pool_address, accumulated_rewards, commission_amount }); + } else { + emit_event( + request_commission_events, + RequestCommissionEvent { operator, pool_address, accumulated_rewards, commission_amount }, + ); }; - emit_event( - request_commission_events, - RequestCommissionEvent { operator, pool_address, accumulated_rewards, commission_amount }, - ); commission_amount } @@ -2103,11 +2110,12 @@ This also triggers paying commission to the operator for accounting simplicity. let pool_address = staking_contract.pool_address; if (std::features::module_event_migration_enabled()) { emit(UnlockStake { pool_address, operator, amount, commission_paid }); + } else { + emit_event( + &mut store.unlock_stake_events, + UnlockStakeEvent { pool_address, operator, amount, commission_paid }, + ); }; - emit_event( - &mut store.unlock_stake_events, - UnlockStakeEvent { pool_address, operator, amount, commission_paid }, - ); }
@@ -2233,11 +2241,12 @@ Allows staker to switch operator without going through the lenghthy process to u simple_map::add(staking_contracts, new_operator, staking_contract); if (std::features::module_event_migration_enabled()) { emit(SwitchOperator { pool_address, old_operator, new_operator }); + } else { + emit_event( + &mut store.switch_operator_events, + SwitchOperatorEvent { pool_address, old_operator, new_operator } + ); }; - emit_event( - &mut store.switch_operator_events, - SwitchOperatorEvent { pool_address, old_operator, new_operator } - ); }
@@ -2360,7 +2369,7 @@ Distribute all unlocked (inactive) funds according to distribution shares. // Buy all recipients out of the distribution pool. while (pool_u64::shareholders_count(distribution_pool) > 0) { let recipients = pool_u64::shareholders(distribution_pool); - let recipient = *vector::borrow(&mut recipients, 0); + let recipient = *vector::borrow(&recipients, 0); let current_shares = pool_u64::shares(distribution_pool, recipient); let amount_to_distribute = pool_u64::redeem_shares(distribution_pool, recipient, current_shares); // If the recipient is the operator, send the commission to the beneficiary instead. @@ -2371,11 +2380,12 @@ Distribute all unlocked (inactive) funds according to distribution shares. if (std::features::module_event_migration_enabled()) { emit(Distribute { operator, pool_address, recipient, amount: amount_to_distribute }); + } else { + emit_event( + distribute_events, + DistributeEvent { operator, pool_address, recipient, amount: amount_to_distribute } + ); }; - emit_event( - distribute_events, - DistributeEvent { operator, pool_address, recipient, amount: amount_to_distribute } - ); }; // In case there's any dust left, send them all to the staker. @@ -2410,7 +2420,7 @@ Assert that a staking_contract exists for the staker/operator pair.
fun assert_staking_contract_exists(staker: address, operator: address) acquires Store {
     assert!(exists<Store>(staker), error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_STAKER));
-    let staking_contracts = &mut borrow_global_mut<Store>(staker).staking_contracts;
+    let staking_contracts = &borrow_global<Store>(staker).staking_contracts;
     assert!(
         simple_map::contains_key(staking_contracts, &operator),
         error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_OPERATOR),
@@ -2454,11 +2464,12 @@ Add a new distribution for recipient and amount to the
     let pool_address = staking_contract.pool_address;
     if (std::features::module_event_migration_enabled()) {
         emit(AddDistribution { operator, pool_address, amount: coins_amount });
+    } else {
+        emit_event(
+            add_distribution_events,
+            AddDistributionEvent { operator, pool_address, amount: coins_amount }
+        );
     };
-    emit_event(
-        add_distribution_events,
-        AddDistributionEvent { operator, pool_address, amount: coins_amount }
-    );
 }
 
@@ -2763,6 +2774,62 @@ Create a new staking_contracts resource. + + +### Struct `StakingContract` + + +
struct StakingContract has store
+
+ + + +
+
+principal: u64 +
+
+ +
+
+pool_address: address +
+
+ +
+
+owner_cap: stake::OwnerCapability +
+
+ +
+
+commission_percentage: u64 +
+
+ +
+
+distribution_pool: pool_u64::Pool +
+
+ +
+
+signer_cap: account::SignerCapability +
+
+ +
+
+ + + +
invariant commission_percentage >= 0 && commission_percentage <= 100;
+
+ + + ### Function `stake_pool_address` @@ -2836,7 +2903,6 @@ Staking_contract exists the stacker/operator pair.
pragma verify_duration_estimate = 120;
-requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
 let staking_contracts = global<Store>(staker).staking_contracts;
 let staking_contract = simple_map::spec_get(staking_contracts, operator);
 include ContractExistsAbortsIf;
@@ -2893,35 +2959,52 @@ Staking_contract exists the stacker/operator pair.
 
 
 
+
 
-
+### Function `beneficiary_for_operator`
 
 
-
fun spec_staking_contract_exists(staker: address, operator: address): bool {
-   if (!exists<Store>(staker)) {
-       false
-   } else {
-       let store = global<Store>(staker);
-       simple_map::spec_contains_key(store.staking_contracts, operator)
-   }
-}
+
#[view]
+public fun beneficiary_for_operator(operator: address): address
 
- -### Function `beneficiary_for_operator` +
pragma verify = false;
+
+ + + + + +### Function `get_expected_stake_pool_address`
#[view]
-public fun beneficiary_for_operator(operator: address): address
+public fun get_expected_stake_pool_address(staker: address, operator: address, contract_creation_seed: vector<u8>): address
 
-
pragma verify = false;
+
pragma aborts_if_is_partial;
+
+ + + + + + + +
fun spec_staking_contract_exists(staker: address, operator: address): bool {
+   if (!exists<Store>(staker)) {
+       false
+   } else {
+       let store = global<Store>(staker);
+       simple_map::spec_contains_key(store.staking_contracts, operator)
+   }
+}
 
@@ -3023,6 +3106,7 @@ Staking_contract exists the stacker/operator pair. let post staking_contract = simple_map::spec_get(store.staking_contracts, operator); let post pool_address = staking_contract.owner_cap.pool_address; let post new_delegated_voter = global<stake::StakePool>(pool_address).delegated_voter; +// This enforces high-level requirement 4: ensures new_delegated_voter == new_voter;
@@ -3117,7 +3201,6 @@ Only staker or operator can call this.
pragma verify = false;
-requires amount > 0;
 let staker_address = signer::address_of(staker);
 include ContractExistsAbortsIf { staker: staker_address };
 
@@ -3137,8 +3220,6 @@ Staking_contract exists the stacker/operator pair.
pragma verify = false;
-// This enforces high-level requirement 4:
-requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
 let staker_address = signer::address_of(staker);
 let staking_contracts = global<Store>(staker_address).staking_contracts;
 let staking_contract = simple_map::spec_get(staking_contracts, operator);
@@ -3372,10 +3453,11 @@ The StakePool exists under the pool_address of StakingContract.
 
 
 The Account exists under the staker.
-The guid_creation_num of the ccount resource is up to MAX_U64.
+The guid_creation_num of the account resource is up to MAX_U64.
 
 
-
include NewStakingContractsHolderAbortsIf;
+
pragma aborts_if_is_partial;
+include NewStakingContractsHolderAbortsIf;
 
@@ -3387,10 +3469,6 @@ The guid_creation_num of the ccount resource is up to MAX_U64.
schema NewStakingContractsHolderAbortsIf {
     staker: signer;
     let addr = signer::address_of(staker);
-    let account = global<account::Account>(addr);
-    aborts_if !exists<account::Account>(addr);
-    aborts_if account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
-    aborts_if account.guid_creation_num + 9 > MAX_U64;
 }
 
@@ -3543,7 +3621,6 @@ a staking_contract exists for the staker/operator pair. requires exists<staking_config::StakingRewardsConfig>( @aptos_framework ) || !std::features::spec_periodical_reward_rate_decrease_enabled(); - requires exists<stake::ValidatorFees>(@aptos_framework); requires exists<aptos_framework::timestamp::CurrentTimeMicroseconds>(@aptos_framework); requires exists<stake::AptosCoinCapabilities>(@aptos_framework); } diff --git a/aptos-move/framework/aptos-framework/doc/staking_proxy.md b/aptos-move/framework/aptos-framework/doc/staking_proxy.md index c05adb7f26803..2a2f4577aa04d 100644 --- a/aptos-move/framework/aptos-framework/doc/staking_proxy.md +++ b/aptos-move/framework/aptos-framework/doc/staking_proxy.md @@ -5,6 +5,10 @@ +- [Struct `StakeProxyPermission`](#0x1_staking_proxy_StakeProxyPermission) +- [Constants](#@Constants_0) +- [Function `check_stake_proxy_permission`](#0x1_staking_proxy_check_stake_proxy_permission) +- [Function `grant_permission`](#0x1_staking_proxy_grant_permission) - [Function `set_operator`](#0x1_staking_proxy_set_operator) - [Function `set_voter`](#0x1_staking_proxy_set_voter) - [Function `set_vesting_contract_operator`](#0x1_staking_proxy_set_vesting_contract_operator) @@ -13,20 +17,23 @@ - [Function `set_vesting_contract_voter`](#0x1_staking_proxy_set_vesting_contract_voter) - [Function `set_staking_contract_voter`](#0x1_staking_proxy_set_staking_contract_voter) - [Function `set_stake_pool_voter`](#0x1_staking_proxy_set_stake_pool_voter) -- [Specification](#@Specification_0) +- [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - - [Function `set_operator`](#@Specification_0_set_operator) - - [Function `set_voter`](#@Specification_0_set_voter) - - [Function `set_vesting_contract_operator`](#@Specification_0_set_vesting_contract_operator) - - [Function `set_staking_contract_operator`](#@Specification_0_set_staking_contract_operator) - - [Function `set_stake_pool_operator`](#@Specification_0_set_stake_pool_operator) - - [Function `set_vesting_contract_voter`](#@Specification_0_set_vesting_contract_voter) - - [Function `set_staking_contract_voter`](#@Specification_0_set_staking_contract_voter) - - [Function `set_stake_pool_voter`](#@Specification_0_set_stake_pool_voter) - - -
use 0x1::signer;
+    -  [Function `grant_permission`](#@Specification_1_grant_permission)
+    -  [Function `set_operator`](#@Specification_1_set_operator)
+    -  [Function `set_voter`](#@Specification_1_set_voter)
+    -  [Function `set_vesting_contract_operator`](#@Specification_1_set_vesting_contract_operator)
+    -  [Function `set_staking_contract_operator`](#@Specification_1_set_staking_contract_operator)
+    -  [Function `set_stake_pool_operator`](#@Specification_1_set_stake_pool_operator)
+    -  [Function `set_vesting_contract_voter`](#@Specification_1_set_vesting_contract_voter)
+    -  [Function `set_staking_contract_voter`](#@Specification_1_set_staking_contract_voter)
+    -  [Function `set_stake_pool_voter`](#@Specification_1_set_stake_pool_voter)
+
+
+
use 0x1::error;
+use 0x1::permissioned_signer;
+use 0x1::signer;
 use 0x1::stake;
 use 0x1::staking_contract;
 use 0x1::vesting;
@@ -34,6 +41,101 @@
 
 
 
+
+
+## Struct `StakeProxyPermission`
+
+
+
+
struct StakeProxyPermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Signer does not have permission to perform stake proxy logic. + + +
const ENO_STAKE_PERMISSION: u64 = 28;
+
+ + + + + +## Function `check_stake_proxy_permission` + +Permissions + + +
fun check_stake_proxy_permission(s: &signer)
+
+ + + +
+Implementation + + +
inline fun check_stake_proxy_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, StakeProxyPermission {}),
+        error::permission_denied(ENO_STAKE_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `grant_permission` + +Grant permission to mutate staking on behalf of the master signer. + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, StakeProxyPermission {})
+}
+
+ + + +
+ ## Function `set_operator` @@ -102,6 +204,7 @@
public entry fun set_vesting_contract_operator(owner: &signer, old_operator: address, new_operator: address) {
+    check_stake_proxy_permission(owner);
     let owner_address = signer::address_of(owner);
     let vesting_contracts = &vesting::vesting_contracts(owner_address);
     vector::for_each_ref(vesting_contracts, |vesting_contract| {
@@ -134,6 +237,7 @@
 
 
 
public entry fun set_staking_contract_operator(owner: &signer, old_operator: address, new_operator: address) {
+    check_stake_proxy_permission(owner);
     let owner_address = signer::address_of(owner);
     if (staking_contract::staking_contract_exists(owner_address, old_operator)) {
         let current_commission_percentage = staking_contract::commission_percentage(owner_address, old_operator);
@@ -162,6 +266,7 @@
 
 
 
public entry fun set_stake_pool_operator(owner: &signer, new_operator: address) {
+    check_stake_proxy_permission(owner);
     let owner_address = signer::address_of(owner);
     if (stake::stake_pool_exists(owner_address)) {
         stake::set_operator(owner, new_operator);
@@ -189,6 +294,7 @@
 
 
 
public entry fun set_vesting_contract_voter(owner: &signer, operator: address, new_voter: address) {
+    check_stake_proxy_permission(owner);
     let owner_address = signer::address_of(owner);
     let vesting_contracts = &vesting::vesting_contracts(owner_address);
     vector::for_each_ref(vesting_contracts, |vesting_contract| {
@@ -220,6 +326,7 @@
 
 
 
public entry fun set_staking_contract_voter(owner: &signer, operator: address, new_voter: address) {
+    check_stake_proxy_permission(owner);
     let owner_address = signer::address_of(owner);
     if (staking_contract::staking_contract_exists(owner_address, operator)) {
         staking_contract::update_voter(owner, operator, new_voter);
@@ -247,6 +354,7 @@
 
 
 
public entry fun set_stake_pool_voter(owner: &signer, new_voter: address) {
+    check_stake_proxy_permission(owner);
     if (stake::stake_pool_exists(signer::address_of(owner))) {
         stake::set_delegated_voter(owner, new_voter);
     };
@@ -257,7 +365,7 @@
 
 
 
-
+
 
 ## Specification
 
@@ -324,12 +432,31 @@
 
 
 
pragma verify = true;
-pragma aborts_if_is_strict;
+pragma aborts_if_is_partial;
 
- + + +### Function `grant_permission` + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer)
+
+ + + + +
pragma aborts_if_is_partial;
+aborts_if !permissioned_signer::spec_is_permissioned_signer(permissioned_signer);
+aborts_if permissioned_signer::spec_is_permissioned_signer(master);
+aborts_if signer::address_of(master) != signer::address_of(permissioned_signer);
+
+ + + + ### Function `set_operator` @@ -349,7 +476,7 @@ Aborts if conditions of SetStakePoolOperator are not met - + ### Function `set_voter` @@ -362,13 +489,14 @@ Aborts if conditions of SetStackingContractVoter and SetStackPoolVoterAbortsIf a
pragma aborts_if_is_partial;
+pragma verify_duration_estimate = 120;
 include SetStakingContractVoter;
 include SetStakePoolVoterAbortsIf;
 
- + ### Function `set_vesting_contract_operator` @@ -384,7 +512,7 @@ Aborts if conditions of SetStackingContractVoter and SetStackPoolVoterAbortsIf a - + ### Function `set_staking_contract_operator` @@ -438,7 +566,7 @@ Aborts if conditions of SetStackingContractVoter and SetStackPoolVoterAbortsIf a - + ### Function `set_stake_pool_operator` @@ -452,6 +580,12 @@ One of them are not exists
include SetStakePoolOperator;
+include AbortsIfSignerPermissionStakeProxy {
+    s: owner
+};
+include exists<stake::StakePool>(signer::address_of(owner)) ==> stake::AbortsIfSignerPermissionStake {
+    s:owner
+};
 
@@ -463,6 +597,9 @@ One of them are not exists
schema SetStakePoolOperator {
     owner: &signer;
     new_operator: address;
+    include AbortsIfSignerPermissionStakeProxy {
+        s: owner
+    };
     let owner_address = signer::address_of(owner);
     let ownership_cap = borrow_global<stake::OwnerCapability>(owner_address);
     let pool_address = ownership_cap.pool_address;
@@ -473,7 +610,7 @@ One of them are not exists
 
 
 
-
+
 
 ### Function `set_vesting_contract_voter`
 
@@ -489,7 +626,7 @@ One of them are not exists
 
 
 
-
+
 
 ### Function `set_staking_contract_voter`
 
@@ -501,6 +638,9 @@ One of them are not exists
 
 
 
include SetStakingContractVoter;
+include AbortsIfSignerPermissionStakeProxy {
+    s: owner
+};
 
@@ -531,7 +671,7 @@ Then abort if the resource is not exist - + ### Function `set_stake_pool_voter` @@ -543,6 +683,12 @@ Then abort if the resource is not exist
include SetStakePoolVoterAbortsIf;
+include AbortsIfSignerPermissionStakeProxy {
+    s: owner
+};
+include exists<stake::StakePool>(signer::address_of(owner)) ==> stake::AbortsIfSignerPermissionStake {
+    s:owner
+};
 
@@ -554,6 +700,9 @@ Then abort if the resource is not exist
schema SetStakePoolVoterAbortsIf {
     owner: &signer;
     new_voter: address;
+    include AbortsIfSignerPermissionStakeProxy {
+        s: owner
+    };
     let owner_address = signer::address_of(owner);
     let ownership_cap = global<stake::OwnerCapability>(owner_address);
     let pool_address = ownership_cap.pool_address;
@@ -563,4 +712,17 @@ Then abort if the resource is not exist
 
+ + + + + +
schema AbortsIfSignerPermissionStakeProxy {
+    s: signer;
+    let perm = StakeProxyPermission {};
+    aborts_if !permissioned_signer::spec_check_permission_exists(s, perm);
+}
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/system_addresses.md b/aptos-move/framework/aptos-framework/doc/system_addresses.md index 6f24ca3a14ae2..11206ab5bcfd6 100644 --- a/aptos-move/framework/aptos-framework/doc/system_addresses.md +++ b/aptos-move/framework/aptos-framework/doc/system_addresses.md @@ -31,7 +31,6 @@
use 0x1::error;
-use 0x1::features;
 use 0x1::signer;
 
@@ -146,12 +145,7 @@ The operation can only be performed by the VM
public fun is_core_resource_address(addr: address): bool {
-    // Check if the feature flag for decommissioning core resources is enabled.
-    if (get_decommission_core_resources_enabled()) {
-        false
-    } else {
-        addr == @core_resources
-    }
+    addr == @core_resources
 }
 
@@ -503,20 +497,6 @@ Return true if addr is either the VM address or an Aptos Framework
-Specifies that a function aborts if the account does not have the root address. - - - - - -
schema AbortsIfNotCoreResource {
-    addr: address;
-    // This enforces high-level requirement 1:
-    aborts_if addr != @core_resources with error::PERMISSION_DENIED;
-}
-
- - @@ -566,6 +546,20 @@ Specifies that a function aborts if the account does not have the root address.
+Specifies that a function aborts if the account does not have the aptos framework address. + + + + + +
schema AbortsIfNotAptosFramework {
+    account: signer;
+    // This enforces high-level requirement 2:
+    aborts_if signer::address_of(account) != @aptos_framework with error::PERMISSION_DENIED;
+}
+
+ + @@ -583,4 +577,18 @@ Specifies that a function aborts if the account does not have the root address.
+Specifies that a function aborts if the account does not have the VM reserved address. + + + + + +
schema AbortsIfNotVM {
+    account: signer;
+    // This enforces high-level requirement 3:
+    aborts_if signer::address_of(account) != @vm_reserved with error::PERMISSION_DENIED;
+}
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/transaction_context.md b/aptos-move/framework/aptos-framework/doc/transaction_context.md index cc7d9010ffc74..d4dbf2d635811 100644 --- a/aptos-move/framework/aptos-framework/doc/transaction_context.md +++ b/aptos-move/framework/aptos-framework/doc/transaction_context.md @@ -1029,6 +1029,7 @@ Returns the inner entry function payload of the multisig payload.
pragma opaque;
+aborts_if [abstract] false;
 ensures [abstract] result == spec_generate_unique_address();
 
@@ -1055,6 +1056,7 @@ Returns the inner entry function payload of the multisig payload.
pragma opaque;
+aborts_if [abstract] false;
 // This enforces high-level requirement 3:
 ensures [abstract] result == spec_generate_unique_address();
 
diff --git a/aptos-move/framework/aptos-framework/doc/transaction_fee.md b/aptos-move/framework/aptos-framework/doc/transaction_fee.md index 2a7542fe4bb4c..a42f4ec46f0c8 100644 --- a/aptos-move/framework/aptos-framework/doc/transaction_fee.md +++ b/aptos-move/framework/aptos-framework/doc/transaction_fee.md @@ -3,49 +3,34 @@ # Module `0x1::transaction_fee` -This module provides an interface to burn or collect and redistribute transaction fees. -- [Resource `CopyCapabilitiesOneShot`](#0x1_transaction_fee_CopyCapabilitiesOneShot) - [Resource `AptosCoinCapabilities`](#0x1_transaction_fee_AptosCoinCapabilities) - [Resource `AptosFABurnCapabilities`](#0x1_transaction_fee_AptosFABurnCapabilities) - [Resource `AptosCoinMintCapability`](#0x1_transaction_fee_AptosCoinMintCapability) -- [Resource `CollectedFeesPerBlock`](#0x1_transaction_fee_CollectedFeesPerBlock) - [Struct `FeeStatement`](#0x1_transaction_fee_FeeStatement) +- [Resource `CollectedFeesPerBlock`](#0x1_transaction_fee_CollectedFeesPerBlock) - [Constants](#@Constants_0) -- [Function `initialize_fee_collection_and_distribution`](#0x1_transaction_fee_initialize_fee_collection_and_distribution) -- [Function `is_fees_collection_enabled`](#0x1_transaction_fee_is_fees_collection_enabled) -- [Function `upgrade_burn_percentage`](#0x1_transaction_fee_upgrade_burn_percentage) -- [Function `register_proposer_for_fee_collection`](#0x1_transaction_fee_register_proposer_for_fee_collection) -- [Function `burn_coin_fraction`](#0x1_transaction_fee_burn_coin_fraction) -- [Function `process_collected_fees`](#0x1_transaction_fee_process_collected_fees) -- [Function `burn_from`](#0x1_transaction_fee_burn_from) - [Function `burn_fee`](#0x1_transaction_fee_burn_fee) - [Function `mint_and_refund`](#0x1_transaction_fee_mint_and_refund) -- [Function `collect_fee`](#0x1_transaction_fee_collect_fee) - [Function `store_aptos_coin_burn_cap`](#0x1_transaction_fee_store_aptos_coin_burn_cap) - [Function `convert_to_aptos_fa_burn_ref`](#0x1_transaction_fee_convert_to_aptos_fa_burn_ref) - [Function `store_aptos_coin_mint_cap`](#0x1_transaction_fee_store_aptos_coin_mint_cap) -- [Function `copy_capabilities_for_bridge`](#0x1_transaction_fee_copy_capabilities_for_bridge) -- [Function `copy_capabilities_for_native_bridge`](#0x1_transaction_fee_copy_capabilities_for_native_bridge) -- [Function `initialize_storage_refund`](#0x1_transaction_fee_initialize_storage_refund) - [Function `emit_fee_statement`](#0x1_transaction_fee_emit_fee_statement) +- [Function `initialize_fee_collection_and_distribution`](#0x1_transaction_fee_initialize_fee_collection_and_distribution) +- [Function `upgrade_burn_percentage`](#0x1_transaction_fee_upgrade_burn_percentage) +- [Function `initialize_storage_refund`](#0x1_transaction_fee_initialize_storage_refund) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - [Resource `CollectedFeesPerBlock`](#@Specification_1_CollectedFeesPerBlock) - - [Function `initialize_fee_collection_and_distribution`](#@Specification_1_initialize_fee_collection_and_distribution) - - [Function `upgrade_burn_percentage`](#@Specification_1_upgrade_burn_percentage) - - [Function `register_proposer_for_fee_collection`](#@Specification_1_register_proposer_for_fee_collection) - - [Function `burn_coin_fraction`](#@Specification_1_burn_coin_fraction) - - [Function `process_collected_fees`](#@Specification_1_process_collected_fees) - [Function `burn_fee`](#@Specification_1_burn_fee) - [Function `mint_and_refund`](#@Specification_1_mint_and_refund) - - [Function `collect_fee`](#@Specification_1_collect_fee) - [Function `store_aptos_coin_burn_cap`](#@Specification_1_store_aptos_coin_burn_cap) - [Function `store_aptos_coin_mint_cap`](#@Specification_1_store_aptos_coin_mint_cap) - - [Function `initialize_storage_refund`](#@Specification_1_initialize_storage_refund) - [Function `emit_fee_statement`](#@Specification_1_emit_fee_statement) + - [Function `initialize_fee_collection_and_distribution`](#@Specification_1_initialize_fee_collection_and_distribution) + - [Function `initialize_storage_refund`](#@Specification_1_initialize_storage_refund)
use 0x1::aptos_account;
@@ -57,40 +42,11 @@ This module provides an interface to burn or collect and redistribute transactio
 use 0x1::fungible_asset;
 use 0x1::option;
 use 0x1::signer;
-use 0x1::stake;
 use 0x1::system_addresses;
 
- - -## Resource `CopyCapabilitiesOneShot` - -The one shot copy capabilities call - - -
struct CopyCapabilitiesOneShot has key
-
- - - -
-Fields - - -
-
-dummy_field: bool -
-
- -
-
- - -
- ## Resource `AptosCoinCapabilities` @@ -173,47 +129,6 @@ Stores mint capability to mint the refunds. - - - - -## Resource `CollectedFeesPerBlock` - -Stores information about the block proposer and the amount of fees -collected when executing the block. - - -
struct CollectedFeesPerBlock has key
-
- - - -
-Fields - - -
-
-amount: coin::AggregatableCoin<aptos_coin::AptosCoin> -
-
- -
-
-proposer: option::Option<address> -
-
- -
-
-burn_percentage: u8 -
-
- -
-
- -
@@ -287,36 +202,60 @@ This is meant to emitted as a module event. - + -## Constants +## Resource `CollectedFeesPerBlock` +DEPRECATED: Stores information about the block proposer and the amount of fees +collected when executing the block. - -Gas fees are already being collected and the struct holding -information about collected amounts is already published. +
#[deprecated]
+struct CollectedFeesPerBlock has key
+
-
const EALREADY_COLLECTING_FEES: u64 = 1;
-
+
+Fields - +
+
+amount: coin::AggregatableCoin<aptos_coin::AptosCoin> +
+
+
+
+proposer: option::Option<address> +
+
+
+
+burn_percentage: u8 +
+
-
const EATOMIC_BRIDGE_NOT_ENABLED: u64 = 6;
-
+
+
+ + +
+ + +## Constants - + +Gas fees are already being collected and the struct holding +information about collected amounts is already published. -
const ECOPY_CAPS_SHOT: u64 = 7;
+
const EALREADY_COLLECTING_FEES: u64 = 1;
 
@@ -340,15 +279,6 @@ The burn percentage is out of range [0, 100]. - - - - -
const ENATIVE_BRIDGE_NOT_ENABLED: u64 = 8;
-
- - - No longer supported. @@ -359,295 +289,6 @@ No longer supported. - - -## Function `initialize_fee_collection_and_distribution` - -Initializes the resource storing information about gas fees collection and -distribution. Should be called by on-chain governance. - - -
public fun initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8)
-
- - - -
-Implementation - - -
public fun initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8) {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    assert!(
-        !exists<CollectedFeesPerBlock>(@aptos_framework),
-        error::already_exists(EALREADY_COLLECTING_FEES)
-    );
-    assert!(burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE));
-
-    // Make sure stakng module is aware of transaction fees collection.
-    stake::initialize_validator_fees(aptos_framework);
-
-    // Initially, no fees are collected and the block proposer is not set.
-    let collected_fees = CollectedFeesPerBlock {
-        amount: coin::initialize_aggregatable_coin(aptos_framework),
-        proposer: option::none(),
-        burn_percentage,
-    };
-    move_to(aptos_framework, collected_fees);
-}
-
- - - -
- - - -## Function `is_fees_collection_enabled` - - - -
fun is_fees_collection_enabled(): bool
-
- - - -
-Implementation - - -
fun is_fees_collection_enabled(): bool {
-    exists<CollectedFeesPerBlock>(@aptos_framework)
-}
-
- - - -
- - - -## Function `upgrade_burn_percentage` - -Sets the burn percentage for collected fees to a new value. Should be called by on-chain governance. - - -
public fun upgrade_burn_percentage(aptos_framework: &signer, new_burn_percentage: u8)
-
- - - -
-Implementation - - -
public fun upgrade_burn_percentage(
-    aptos_framework: &signer,
-    new_burn_percentage: u8
-) acquires AptosCoinCapabilities, CollectedFeesPerBlock {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    assert!(new_burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE));
-
-    // Prior to upgrading the burn percentage, make sure to process collected
-    // fees. Otherwise we would use the new (incorrect) burn_percentage when
-    // processing fees later!
-    process_collected_fees();
-
-    if (is_fees_collection_enabled()) {
-        // Upgrade has no effect unless fees are being collected.
-        let burn_percentage = &mut borrow_global_mut<CollectedFeesPerBlock>(@aptos_framework).burn_percentage;
-        *burn_percentage = new_burn_percentage
-    }
-}
-
- - - -
- - - -## Function `register_proposer_for_fee_collection` - -Registers the proposer of the block for gas fees collection. This function -can only be called at the beginning of the block. - - -
public(friend) fun register_proposer_for_fee_collection(proposer_addr: address)
-
- - - -
-Implementation - - -
public(friend) fun register_proposer_for_fee_collection(proposer_addr: address) acquires CollectedFeesPerBlock {
-    if (is_fees_collection_enabled()) {
-        let collected_fees = borrow_global_mut<CollectedFeesPerBlock>(@aptos_framework);
-        let _ = option::swap_or_fill(&mut collected_fees.proposer, proposer_addr);
-    }
-}
-
- - - -
- - - -## Function `burn_coin_fraction` - -Burns a specified fraction of the coin. - - -
fun burn_coin_fraction(coin: &mut coin::Coin<aptos_coin::AptosCoin>, burn_percentage: u8)
-
- - - -
-Implementation - - -
fun burn_coin_fraction(coin: &mut Coin<AptosCoin>, burn_percentage: u8) acquires AptosCoinCapabilities {
-    assert!(burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE));
-
-    let collected_amount = coin::value(coin);
-    spec {
-        // We assume that `burn_percentage * collected_amount` does not overflow.
-        assume burn_percentage * collected_amount <= MAX_U64;
-    };
-    let amount_to_burn = (burn_percentage as u64) * collected_amount / 100;
-    if (amount_to_burn > 0) {
-        let coin_to_burn = coin::extract(coin, amount_to_burn);
-        coin::burn(
-            coin_to_burn,
-            &borrow_global<AptosCoinCapabilities>(@aptos_framework).burn_cap,
-        );
-    }
-}
-
- - - -
- - - -## Function `process_collected_fees` - -Calculates the fee which should be distributed to the block proposer at the -end of an epoch, and records it in the system. This function can only be called -at the beginning of the block or during reconfiguration. - - -
public(friend) fun process_collected_fees()
-
- - - -
-Implementation - - -
public(friend) fun process_collected_fees() acquires AptosCoinCapabilities, CollectedFeesPerBlock {
-    if (!is_fees_collection_enabled()) {
-        return
-    };
-    let collected_fees = borrow_global_mut<CollectedFeesPerBlock>(@aptos_framework);
-
-    // If there are no collected fees, only unset the proposer. See the rationale for
-    // setting proposer to option::none() below.
-    if (coin::is_aggregatable_coin_zero(&collected_fees.amount)) {
-        if (option::is_some(&collected_fees.proposer)) {
-            let _ = option::extract(&mut collected_fees.proposer);
-        };
-        return
-    };
-
-    // Otherwise get the collected fee, and check if it can distributed later.
-    let coin = coin::drain_aggregatable_coin(&mut collected_fees.amount);
-    if (option::is_some(&collected_fees.proposer)) {
-        // Extract the address of proposer here and reset it to option::none(). This
-        // is particularly useful to avoid any undesired side-effects where coins are
-        // collected but never distributed or distributed to the wrong account.
-        // With this design, processing collected fees enforces that all fees will be burnt
-        // unless the proposer is specified in the block prologue. When we have a governance
-        // proposal that triggers reconfiguration, we distribute pending fees and burn the
-        // fee for the proposal. Otherwise, that fee would be leaked to the next block.
-        let proposer = option::extract(&mut collected_fees.proposer);
-
-        // Since the block can be produced by the VM itself, we have to make sure we catch
-        // this case.
-        if (proposer == @vm_reserved) {
-            burn_coin_fraction(&mut coin, 100);
-            coin::destroy_zero(coin);
-            return
-        };
-
-        burn_coin_fraction(&mut coin, collected_fees.burn_percentage);
-        stake::add_transaction_fee(proposer, coin);
-        return
-    };
-
-    // If checks did not pass, simply burn all collected coins and return none.
-    burn_coin_fraction(&mut coin, 100);
-    coin::destroy_zero(coin)
-}
-
- - - -
- - - -## Function `burn_from` - -Burns a specified amount of AptosCoin from an address. - -@param core_resource The signer representing the core resource account. -@param account The address from which to burn AptosCoin. -@param fee The amount of AptosCoin to burn. -@abort If the burn capability is not available. - - -
public fun burn_from(aptos_framework: &signer, account: address, fee: u64)
-
- - - -
-Implementation - - -
public fun burn_from(aptos_framework: &signer, account: address, fee: u64) acquires AptosFABurnCapabilities, AptosCoinCapabilities {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    if (exists<AptosFABurnCapabilities>(@aptos_framework)) {
-        let burn_ref = &borrow_global<AptosFABurnCapabilities>(@aptos_framework).burn_ref;
-        aptos_account::burn_from_fungible_store(burn_ref, account, fee);
-    } else {
-        let burn_cap = &borrow_global<AptosCoinCapabilities>(@aptos_framework).burn_cap;
-        if (features::operations_default_to_fa_apt_store_enabled()) {
-            let (burn_ref, burn_receipt) = coin::get_paired_burn_ref(burn_cap);
-            aptos_account::burn_from_fungible_store(&burn_ref, account, fee);
-            coin::return_paired_burn_ref(burn_ref, burn_receipt);
-        } else {
-            coin::burn_from<AptosCoin>(
-                account,
-                fee,
-                burn_cap,
-            );
-        };
-    };
-}
-
- - - -
- ## Function `burn_fee` @@ -667,15 +308,15 @@ Burn transaction fees in epilogue.
public(friend) fun burn_fee(account: address, fee: u64) acquires AptosFABurnCapabilities, AptosCoinCapabilities {
     if (exists<AptosFABurnCapabilities>(@aptos_framework)) {
         let burn_ref = &borrow_global<AptosFABurnCapabilities>(@aptos_framework).burn_ref;
-        aptos_account::burn_from_fungible_store(burn_ref, account, fee);
+        aptos_account::burn_from_fungible_store_for_gas(burn_ref, account, fee);
     } else {
         let burn_cap = &borrow_global<AptosCoinCapabilities>(@aptos_framework).burn_cap;
         if (features::operations_default_to_fa_apt_store_enabled()) {
             let (burn_ref, burn_receipt) = coin::get_paired_burn_ref(burn_cap);
-            aptos_account::burn_from_fungible_store(&burn_ref, account, fee);
+            aptos_account::burn_from_fungible_store_for_gas(&burn_ref, account, fee);
             coin::return_paired_burn_ref(burn_ref, burn_receipt);
         } else {
-            coin::burn_from<AptosCoin>(
+            coin::burn_from_for_gas<AptosCoin>(
                 account,
                 fee,
                 burn_cap,
@@ -708,38 +349,7 @@ Mint refund in epilogue.
 
public(friend) fun mint_and_refund(account: address, refund: u64) acquires AptosCoinMintCapability {
     let mint_cap = &borrow_global<AptosCoinMintCapability>(@aptos_framework).mint_cap;
     let refund_coin = coin::mint(refund, mint_cap);
-    coin::force_deposit(account, refund_coin);
-}
-
- - - - - - - -## Function `collect_fee` - -Collect transaction fees in epilogue. - - -
public(friend) fun collect_fee(account: address, fee: u64)
-
- - - -
-Implementation - - -
public(friend) fun collect_fee(account: address, fee: u64) acquires CollectedFeesPerBlock {
-    let collected_fees = borrow_global_mut<CollectedFeesPerBlock>(@aptos_framework);
-
-    // Here, we are always optimistic and always collect fees. If the proposer is not set,
-    // or we cannot redistribute fees later for some reason (e.g. account cannot receive AptoCoin)
-    // we burn them all at once. This way we avoid having a check for every transaction epilogue.
-    let collected_amount = &mut collected_fees.amount;
-    coin::collect_into_aggregatable_coin<AptosCoin>(account, fee, collected_amount);
+    coin::deposit_for_gas_fee(account, refund_coin);
 }
 
@@ -835,15 +445,13 @@ Only called during genesis.
- + -## Function `copy_capabilities_for_bridge` +## Function `emit_fee_statement` -Copy Mint and Burn capabilities over to bridge -Can only be called once after which it will assert -
public fun copy_capabilities_for_bridge(aptos_framework: &signer): (coin::MintCapability<aptos_coin::AptosCoin>, coin::BurnCapability<aptos_coin::AptosCoin>)
+
fun emit_fee_statement(fee_statement: transaction_fee::FeeStatement)
 
@@ -852,16 +460,8 @@ Can only be called once after which it will assert Implementation -
public fun copy_capabilities_for_bridge(aptos_framework: &signer) : (MintCapability<AptosCoin>, BurnCapability<AptosCoin>)
-acquires AptosCoinCapabilities, AptosCoinMintCapability {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    assert!(features::abort_atomic_bridge_enabled(), EATOMIC_BRIDGE_NOT_ENABLED);
-    assert!(!exists<CopyCapabilitiesOneShot>(@aptos_framework), ECOPY_CAPS_SHOT);
-    move_to(aptos_framework, CopyCapabilitiesOneShot{});
-    (
-        borrow_global<AptosCoinMintCapability>(@aptos_framework).mint_cap,
-        borrow_global<AptosCoinCapabilities>(@aptos_framework).burn_cap
-    )
+
fun emit_fee_statement(fee_statement: FeeStatement) {
+    event::emit(fee_statement)
 }
 
@@ -869,15 +469,15 @@ Can only be called once after which it will assert - + -## Function `copy_capabilities_for_native_bridge` +## Function `initialize_fee_collection_and_distribution` -Copy Mint and Burn capabilities over to bridge -Can only be called once after which it will assert +DEPRECATED -
public fun copy_capabilities_for_native_bridge(aptos_framework: &signer): (coin::MintCapability<aptos_coin::AptosCoin>, coin::BurnCapability<aptos_coin::AptosCoin>)
+
#[deprecated]
+public fun initialize_fee_collection_and_distribution(_aptos_framework: &signer, _burn_percentage: u8)
 
@@ -886,16 +486,8 @@ Can only be called once after which it will assert Implementation -
public fun copy_capabilities_for_native_bridge(aptos_framework: &signer) : (MintCapability<AptosCoin>, BurnCapability<AptosCoin>)
-acquires AptosCoinCapabilities, AptosCoinMintCapability {
-    system_addresses::assert_aptos_framework(aptos_framework);
-    assert!(features::abort_native_bridge_enabled(), ENATIVE_BRIDGE_NOT_ENABLED);
-    assert!(!exists<CopyCapabilitiesOneShot>(@aptos_framework), ECOPY_CAPS_SHOT);
-    move_to(aptos_framework, CopyCapabilitiesOneShot{});
-    (
-        borrow_global<AptosCoinMintCapability>(@aptos_framework).mint_cap,
-        borrow_global<AptosCoinCapabilities>(@aptos_framework).burn_cap
-    )
+
public fun initialize_fee_collection_and_distribution(_aptos_framework: &signer, _burn_percentage: u8) {
+    abort error::not_implemented(ENO_LONGER_SUPPORTED)
 }
 
@@ -903,14 +495,15 @@ Can only be called once after which it will assert - + -## Function `initialize_storage_refund` +## Function `upgrade_burn_percentage` +DEPRECATED
#[deprecated]
-public fun initialize_storage_refund(_: &signer)
+public fun upgrade_burn_percentage(_aptos_framework: &signer, _new_burn_percentage: u8)
 
@@ -919,7 +512,10 @@ Can only be called once after which it will assert Implementation -
public fun initialize_storage_refund(_: &signer) {
+
public fun upgrade_burn_percentage(
+    _aptos_framework: &signer,
+    _new_burn_percentage: u8
+) {
     abort error::not_implemented(ENO_LONGER_SUPPORTED)
 }
 
@@ -928,13 +524,14 @@ Can only be called once after which it will assert - + -## Function `emit_fee_statement` +## Function `initialize_storage_refund` -
fun emit_fee_statement(fee_statement: transaction_fee::FeeStatement)
+
#[deprecated]
+public fun initialize_storage_refund(_: &signer)
 
@@ -943,8 +540,8 @@ Can only be called once after which it will assert Implementation -
fun emit_fee_statement(fee_statement: FeeStatement) {
-    event::emit(fee_statement)
+
public fun initialize_storage_refund(_: &signer) {
+    abort error::not_implemented(ENO_LONGER_SUPPORTED)
 }
 
@@ -1039,7 +636,8 @@ Can only be called once after which it will assert ### Resource `CollectedFeesPerBlock` -
struct CollectedFeesPerBlock has key
+
#[deprecated]
+struct CollectedFeesPerBlock has key
 
@@ -1073,178 +671,6 @@ Can only be called once after which it will assert - - -### Function `initialize_fee_collection_and_distribution` - - -
public fun initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8)
-
- - - - -
// This enforces high-level requirement 2:
-aborts_if exists<CollectedFeesPerBlock>(@aptos_framework);
-aborts_if burn_percentage > 100;
-let aptos_addr = signer::address_of(aptos_framework);
-// This enforces high-level requirement 3:
-aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
-aborts_if exists<ValidatorFees>(aptos_addr);
-include system_addresses::AbortsIfNotAptosFramework { account: aptos_framework };
-include aggregator_factory::CreateAggregatorInternalAbortsIf;
-aborts_if exists<CollectedFeesPerBlock>(aptos_addr);
-ensures exists<ValidatorFees>(aptos_addr);
-ensures exists<CollectedFeesPerBlock>(aptos_addr);
-
- - - - - -### Function `upgrade_burn_percentage` - - -
public fun upgrade_burn_percentage(aptos_framework: &signer, new_burn_percentage: u8)
-
- - - - -
aborts_if new_burn_percentage > 100;
-let aptos_addr = signer::address_of(aptos_framework);
-aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
-// This enforces high-level requirement 5 and high-level requirement 6:
-include ProcessCollectedFeesRequiresAndEnsures;
-ensures exists<CollectedFeesPerBlock>(@aptos_framework) ==>
-    global<CollectedFeesPerBlock>(@aptos_framework).burn_percentage == new_burn_percentage;
-
- - - - - -### Function `register_proposer_for_fee_collection` - - -
public(friend) fun register_proposer_for_fee_collection(proposer_addr: address)
-
- - - - -
aborts_if false;
-// This enforces high-level requirement 6:
-ensures is_fees_collection_enabled() ==>
-    option::spec_borrow(global<CollectedFeesPerBlock>(@aptos_framework).proposer) == proposer_addr;
-
- - - - - -### Function `burn_coin_fraction` - - -
fun burn_coin_fraction(coin: &mut coin::Coin<aptos_coin::AptosCoin>, burn_percentage: u8)
-
- - - - -
requires burn_percentage <= 100;
-requires exists<AptosCoinCapabilities>(@aptos_framework);
-requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
-let amount_to_burn = (burn_percentage * coin::value(coin)) / 100;
-include amount_to_burn > 0 ==> coin::CoinSubAbortsIf<AptosCoin> { amount: amount_to_burn };
-ensures coin.value == old(coin).value - amount_to_burn;
-
- - - - - - - -
fun collectedFeesAggregator(): AggregatableCoin<AptosCoin> {
-   global<CollectedFeesPerBlock>(@aptos_framework).amount
-}
-
- - - - - - - -
schema RequiresCollectedFeesPerValueLeqBlockAptosSupply {
-    let maybe_supply = coin::get_coin_supply_opt<AptosCoin>();
-    requires
-        (is_fees_collection_enabled() && option::is_some(maybe_supply)) ==>
-            (aggregator::spec_aggregator_get_val(global<CollectedFeesPerBlock>(@aptos_framework).amount.value) <=
-                optional_aggregator::optional_aggregator_value(
-                    option::spec_borrow(coin::get_coin_supply_opt<AptosCoin>())
-                ));
-}
-
- - - - - - - -
schema ProcessCollectedFeesRequiresAndEnsures {
-    requires exists<AptosCoinCapabilities>(@aptos_framework);
-    requires exists<stake::ValidatorFees>(@aptos_framework);
-    requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
-    include RequiresCollectedFeesPerValueLeqBlockAptosSupply;
-    aborts_if false;
-    let collected_fees = global<CollectedFeesPerBlock>(@aptos_framework);
-    let post post_collected_fees = global<CollectedFeesPerBlock>(@aptos_framework);
-    let pre_amount = aggregator::spec_aggregator_get_val(collected_fees.amount.value);
-    let post post_amount = aggregator::spec_aggregator_get_val(post_collected_fees.amount.value);
-    let fees_table = global<stake::ValidatorFees>(@aptos_framework).fees_table;
-    let post post_fees_table = global<stake::ValidatorFees>(@aptos_framework).fees_table;
-    let proposer = option::spec_borrow(collected_fees.proposer);
-    let fee_to_add = pre_amount - pre_amount * collected_fees.burn_percentage / 100;
-    ensures is_fees_collection_enabled() ==> option::spec_is_none(post_collected_fees.proposer) && post_amount == 0;
-    ensures is_fees_collection_enabled() && aggregator::spec_read(collected_fees.amount.value) > 0 &&
-        option::spec_is_some(collected_fees.proposer) ==>
-        if (proposer != @vm_reserved) {
-            if (table::spec_contains(fees_table, proposer)) {
-                table::spec_get(post_fees_table, proposer).value == table::spec_get(
-                    fees_table,
-                    proposer
-                ).value + fee_to_add
-            } else {
-                table::spec_get(post_fees_table, proposer).value == fee_to_add
-            }
-        } else {
-            option::spec_is_none(post_collected_fees.proposer) && post_amount == 0
-        };
-}
-
- - - - - -### Function `process_collected_fees` - - -
public(friend) fun process_collected_fees()
-
- - - - -
// This enforces high-level requirement 6:
-include ProcessCollectedFeesRequiresAndEnsures;
-
- - - ### Function `burn_fee` @@ -1310,38 +736,6 @@ Can only be called once after which it will assert - - -### Function `collect_fee` - - -
public(friend) fun collect_fee(account: address, fee: u64)
-
- - - - -
pragma verify = false;
-let collected_fees = global<CollectedFeesPerBlock>(@aptos_framework).amount;
-let aggr = collected_fees.value;
-let coin_store = global<coin::CoinStore<AptosCoin>>(account);
-aborts_if !exists<CollectedFeesPerBlock>(@aptos_framework);
-aborts_if fee > 0 && !exists<coin::CoinStore<AptosCoin>>(account);
-aborts_if fee > 0 && coin_store.coin.value < fee;
-aborts_if fee > 0 && aggregator::spec_aggregator_get_val(aggr)
-    + fee > aggregator::spec_get_limit(aggr);
-aborts_if fee > 0 && aggregator::spec_aggregator_get_val(aggr)
-    + fee > MAX_U128;
-let post post_coin_store = global<coin::CoinStore<AptosCoin>>(account);
-let post post_collected_fees = global<CollectedFeesPerBlock>(@aptos_framework).amount;
-ensures post_coin_store.coin.value == coin_store.coin.value - fee;
-ensures aggregator::spec_aggregator_get_val(post_collected_fees.value) == aggregator::spec_aggregator_get_val(
-    aggr
-) + fee;
-
- - - ### Function `store_aptos_coin_burn_cap` @@ -1386,34 +780,45 @@ Aborts if + -### Function `initialize_storage_refund` +### Function `emit_fee_statement` -
#[deprecated]
-public fun initialize_storage_refund(_: &signer)
+
fun emit_fee_statement(fee_statement: transaction_fee::FeeStatement)
 
-Historical. Aborts. +Aborts if module event feature is not enabled. -
aborts_if true;
+
+
+### Function `initialize_fee_collection_and_distribution`
+
+
+
#[deprecated]
+public fun initialize_fee_collection_and_distribution(_aptos_framework: &signer, _burn_percentage: u8)
 
- -### Function `emit_fee_statement` + +### Function `initialize_storage_refund` -
fun emit_fee_statement(fee_statement: transaction_fee::FeeStatement)
+
+
#[deprecated]
+public fun initialize_storage_refund(_: &signer)
 
-Aborts if module event feature is not enabled. +Historical. Aborts. + + +
aborts_if true;
+
[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/transaction_validation.md b/aptos-move/framework/aptos-framework/doc/transaction_validation.md index 640631f7f42da..0986c0f52f816 100644 --- a/aptos-move/framework/aptos-framework/doc/transaction_validation.md +++ b/aptos-move/framework/aptos-framework/doc/transaction_validation.md @@ -5,46 +5,146 @@ +- [Enum `ReplayProtector`](#0x1_transaction_validation_ReplayProtector) - [Resource `TransactionValidation`](#0x1_transaction_validation_TransactionValidation) +- [Struct `GasPermission`](#0x1_transaction_validation_GasPermission) - [Constants](#@Constants_0) +- [Function `grant_gas_permission`](#0x1_transaction_validation_grant_gas_permission) +- [Function `revoke_gas_permission`](#0x1_transaction_validation_revoke_gas_permission) - [Function `initialize`](#0x1_transaction_validation_initialize) +- [Function `allow_missing_txn_authentication_key`](#0x1_transaction_validation_allow_missing_txn_authentication_key) - [Function `prologue_common`](#0x1_transaction_validation_prologue_common) +- [Function `check_for_replay_protection_regular_txn`](#0x1_transaction_validation_check_for_replay_protection_regular_txn) +- [Function `check_for_replay_protection_orderless_txn`](#0x1_transaction_validation_check_for_replay_protection_orderless_txn) - [Function `script_prologue`](#0x1_transaction_validation_script_prologue) +- [Function `script_prologue_extended`](#0x1_transaction_validation_script_prologue_extended) - [Function `multi_agent_script_prologue`](#0x1_transaction_validation_multi_agent_script_prologue) +- [Function `multi_agent_script_prologue_extended`](#0x1_transaction_validation_multi_agent_script_prologue_extended) - [Function `multi_agent_common_prologue`](#0x1_transaction_validation_multi_agent_common_prologue) - [Function `fee_payer_script_prologue`](#0x1_transaction_validation_fee_payer_script_prologue) +- [Function `fee_payer_script_prologue_extended`](#0x1_transaction_validation_fee_payer_script_prologue_extended) - [Function `epilogue`](#0x1_transaction_validation_epilogue) +- [Function `epilogue_extended`](#0x1_transaction_validation_epilogue_extended) - [Function `epilogue_gas_payer`](#0x1_transaction_validation_epilogue_gas_payer) +- [Function `epilogue_gas_payer_extended`](#0x1_transaction_validation_epilogue_gas_payer_extended) +- [Function `skip_auth_key_check`](#0x1_transaction_validation_skip_auth_key_check) +- [Function `skip_gas_payment`](#0x1_transaction_validation_skip_gas_payment) +- [Function `unified_prologue`](#0x1_transaction_validation_unified_prologue) +- [Function `unified_prologue_fee_payer`](#0x1_transaction_validation_unified_prologue_fee_payer) +- [Function `unified_epilogue`](#0x1_transaction_validation_unified_epilogue) +- [Function `unified_prologue_v2`](#0x1_transaction_validation_unified_prologue_v2) +- [Function `unified_prologue_fee_payer_v2`](#0x1_transaction_validation_unified_prologue_fee_payer_v2) +- [Function `unified_epilogue_v2`](#0x1_transaction_validation_unified_epilogue_v2) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) + - [Function `grant_gas_permission`](#@Specification_1_grant_gas_permission) + - [Function `revoke_gas_permission`](#@Specification_1_revoke_gas_permission) - [Function `initialize`](#@Specification_1_initialize) - [Function `prologue_common`](#@Specification_1_prologue_common) + - [Function `check_for_replay_protection_regular_txn`](#@Specification_1_check_for_replay_protection_regular_txn) + - [Function `check_for_replay_protection_orderless_txn`](#@Specification_1_check_for_replay_protection_orderless_txn) - [Function `script_prologue`](#@Specification_1_script_prologue) + - [Function `script_prologue_extended`](#@Specification_1_script_prologue_extended) - [Function `multi_agent_script_prologue`](#@Specification_1_multi_agent_script_prologue) + - [Function `multi_agent_script_prologue_extended`](#@Specification_1_multi_agent_script_prologue_extended) - [Function `multi_agent_common_prologue`](#@Specification_1_multi_agent_common_prologue) - [Function `fee_payer_script_prologue`](#@Specification_1_fee_payer_script_prologue) + - [Function `fee_payer_script_prologue_extended`](#@Specification_1_fee_payer_script_prologue_extended) - [Function `epilogue`](#@Specification_1_epilogue) + - [Function `epilogue_extended`](#@Specification_1_epilogue_extended) - [Function `epilogue_gas_payer`](#@Specification_1_epilogue_gas_payer) + - [Function `epilogue_gas_payer_extended`](#@Specification_1_epilogue_gas_payer_extended) + - [Function `unified_prologue`](#@Specification_1_unified_prologue) + - [Function `unified_prologue_fee_payer`](#@Specification_1_unified_prologue_fee_payer) + - [Function `unified_epilogue`](#@Specification_1_unified_epilogue) + - [Function `unified_prologue_v2`](#@Specification_1_unified_prologue_v2) + - [Function `unified_prologue_fee_payer_v2`](#@Specification_1_unified_prologue_fee_payer_v2) + - [Function `unified_epilogue_v2`](#@Specification_1_unified_epilogue_v2)
use 0x1::account;
+use 0x1::account_abstraction;
 use 0x1::aptos_account;
 use 0x1::aptos_coin;
-use 0x1::bcs;
 use 0x1::chain_id;
 use 0x1::coin;
+use 0x1::create_signer;
 use 0x1::error;
 use 0x1::features;
-use 0x1::governed_gas_pool;
+use 0x1::nonce_validation;
+use 0x1::option;
+use 0x1::permissioned_signer;
 use 0x1::signer;
 use 0x1::system_addresses;
 use 0x1::timestamp;
 use 0x1::transaction_fee;
+use 0x1::vector;
 
+ + +## Enum `ReplayProtector` + + + +
enum ReplayProtector
+
+ + + +
+Variants + + +
+Nonce + + +
+Fields + + +
+
+0: u64 +
+
+ +
+
+ + +
+ +
+ +
+SequenceNumber + + +
+Fields + + +
+
+0: u64 +
+
+ +
+
+ + +
+ +
+ +
+ ## Resource `TransactionValidation` @@ -102,6 +202,33 @@ correct chain-specific prologue and epilogue functions + + + + +## Struct `GasPermission` + + + +
struct GasPermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ +
@@ -129,6 +256,15 @@ Transaction exceeded its allocated max gas + + + + +
const MAX_EXPIRATION_TIME_SECONDS_FOR_ORDERLESS_TXNS: u64 = 65;
+
+ + + @@ -177,6 +313,15 @@ important to the semantics of the system. + + + + +
const PROLOGUE_ENONCE_ALREADY_USED: u64 = 1012;
+
+ + + @@ -213,6 +358,15 @@ important to the semantics of the system. + + + + +
const PROLOGUE_ETRANSACTION_EXPIRATION_TOO_FAR_IN_FUTURE: u64 = 1013;
+
+ + + @@ -222,6 +376,76 @@ important to the semantics of the system. + + + + +
const PROLOGUE_PERMISSIONED_GAS_LIMIT_INSUFFICIENT: u64 = 1011;
+
+ + + + + +## Function `grant_gas_permission` + +Permission management + +Master signer grant permissioned signer ability to consume a given amount of gas in octas. + + +
public fun grant_gas_permission(master: &signer, permissioned: &signer, gas_amount: u64)
+
+ + + +
+Implementation + + +
public fun grant_gas_permission(
+    master: &signer,
+    permissioned: &signer,
+    gas_amount: u64
+) {
+    permissioned_signer::authorize_increase(
+        master,
+        permissioned,
+        (gas_amount as u256),
+        GasPermission {}
+    )
+}
+
+ + + +
+ + + +## Function `revoke_gas_permission` + +Removing permissions from permissioned signer. + + +
public fun revoke_gas_permission(permissioned: &signer)
+
+ + + +
+Implementation + + +
public fun revoke_gas_permission(permissioned: &signer) {
+    permissioned_signer::revoke_permission(permissioned, GasPermission {})
+}
+
+ + + +
+ ## Function `initialize` @@ -262,6 +486,32 @@ Only called during genesis to initialize system resources for this module. + + + + +## Function `allow_missing_txn_authentication_key` + + + +
fun allow_missing_txn_authentication_key(transaction_sender: address): bool
+
+ + + +
+Implementation + + +
inline fun allow_missing_txn_authentication_key(transaction_sender: address): bool {
+    // aa verifies authentication itself
+    features::is_derivable_account_abstraction_enabled()
+        || (features::is_account_abstraction_enabled() && account_abstraction::using_dispatchable_authenticator(transaction_sender))
+}
+
+ + +
@@ -270,7 +520,7 @@ Only called during genesis to initialize system resources for this module. -
fun prologue_common(sender: signer, gas_payer: address, txn_sequence_number: u64, txn_authentication_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
fun prologue_common(sender: &signer, gas_payer: &signer, replay_protector: transaction_validation::ReplayProtector, txn_authentication_key: option::Option<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
 
@@ -280,36 +530,121 @@ Only called during genesis to initialize system resources for this module.
fun prologue_common(
-    sender: signer,
-    gas_payer: address,
-    txn_sequence_number: u64,
-    txn_authentication_key: vector<u8>,
+    sender: &signer,
+    gas_payer: &signer,
+    replay_protector: ReplayProtector,
+    txn_authentication_key: Option<vector<u8>>,
     txn_gas_price: u64,
     txn_max_gas_units: u64,
     txn_expiration_time: u64,
     chain_id: u8,
+    is_simulation: bool,
 ) {
+    let sender_address = signer::address_of(sender);
+    let gas_payer_address = signer::address_of(gas_payer);
     assert!(
         timestamp::now_seconds() < txn_expiration_time,
         error::invalid_argument(PROLOGUE_ETRANSACTION_EXPIRED),
     );
     assert!(chain_id::get() == chain_id, error::invalid_argument(PROLOGUE_EBAD_CHAIN_ID));
 
-    let transaction_sender = signer::address_of(&sender);
+    // TODO[Orderless]: Here, we are maintaining the same order of validation steps as before orderless txns were introduced.
+    // Ideally, do the replay protection check in the end after the authentication key check and gas payment checks.
+
+    // Check if the authentication key is valid
+    if (!skip_auth_key_check(is_simulation, &txn_authentication_key)) {
+        if (option::is_some(&txn_authentication_key)) {
+            assert!(
+                txn_authentication_key == option::some(account::get_authentication_key(sender_address)),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+            );
+        } else {
+            assert!(
+                allow_missing_txn_authentication_key(sender_address),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY)
+            );
+        };
+    };
+
+    // Check for replay protection
+    match (replay_protector) {
+        SequenceNumber(txn_sequence_number) => {
+            check_for_replay_protection_regular_txn(
+                sender_address,
+                gas_payer_address,
+                txn_sequence_number,
+            );
+        },
+        Nonce(nonce) => {
+            check_for_replay_protection_orderless_txn(
+                sender_address,
+                nonce,
+                txn_expiration_time,
+            );
+        }
+    };
+
+    // Check if the gas payer has enough balance to pay for the transaction
+    let max_transaction_fee = txn_gas_price * txn_max_gas_units;
+    if (!skip_gas_payment(
+        is_simulation,
+        gas_payer_address
+    )) {
+        assert!(
+            permissioned_signer::check_permission_capacity_above(
+                gas_payer,
+                (max_transaction_fee as u256),
+                GasPermission {}
+            ),
+            error::permission_denied(PROLOGUE_PERMISSIONED_GAS_LIMIT_INSUFFICIENT)
+        );
+        if (features::operations_default_to_fa_apt_store_enabled()) {
+            assert!(
+                aptos_account::is_fungible_balance_at_least(gas_payer_address, max_transaction_fee),
+                error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT)
+            );
+        } else {
+            assert!(
+                coin::is_balance_at_least<AptosCoin>(gas_payer_address, max_transaction_fee),
+                error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT)
+            );
+        }
+    };
+}
+
+ + + + + + + +## Function `check_for_replay_protection_regular_txn` + + + +
fun check_for_replay_protection_regular_txn(sender_address: address, gas_payer_address: address, txn_sequence_number: u64)
+
+ + + +
+Implementation + +
fun check_for_replay_protection_regular_txn(
+    sender_address: address,
+    gas_payer_address: address,
+    txn_sequence_number: u64,
+) {
     if (
-        transaction_sender == gas_payer
-            || account::exists_at(transaction_sender)
+        sender_address == gas_payer_address
+            || account::exists_at(sender_address)
             || !features::sponsored_automatic_account_creation_enabled()
             || txn_sequence_number > 0
     ) {
-        assert!(account::exists_at(transaction_sender), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST));
-        assert!(
-            txn_authentication_key == account::get_authentication_key(transaction_sender),
-            error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
-        );
-
-        let account_sequence_number = account::get_sequence_number(transaction_sender);
+        assert!(account::exists_at(sender_address), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST));
+        let account_sequence_number = account::get_sequence_number(sender_address);
         assert!(
             txn_sequence_number < (1u64 << 63),
             error::out_of_range(PROLOGUE_ESEQUENCE_NUMBER_TOO_BIG)
@@ -331,26 +666,40 @@ Only called during genesis to initialize system resources for this module.
             txn_sequence_number == 0,
             error::invalid_argument(PROLOGUE_ESEQUENCE_NUMBER_TOO_NEW)
         );
-
-        assert!(
-            txn_authentication_key == bcs::to_bytes(&transaction_sender),
-            error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
-        );
     };
+}
+
- let max_transaction_fee = txn_gas_price * txn_max_gas_units; - if (features::operations_default_to_fa_apt_store_enabled()) { - assert!( - aptos_account::is_fungible_balance_at_least(gas_payer, max_transaction_fee), - error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) - ); - } else { - assert!( - coin::is_balance_at_least<AptosCoin>(gas_payer, max_transaction_fee), - error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) - ); - } + +
+ + + +## Function `check_for_replay_protection_orderless_txn` + + + +
fun check_for_replay_protection_orderless_txn(sender: address, nonce: u64, txn_expiration_time: u64)
+
+ + + +
+Implementation + + +
fun check_for_replay_protection_orderless_txn(
+    sender: address,
+    nonce: u64,
+    txn_expiration_time: u64,
+) {
+    // prologue_common already checks that the current_time > txn_expiration_time
+    assert!(
+        txn_expiration_time <= timestamp::now_seconds() + MAX_EXPIRATION_TIME_SECONDS_FOR_ORDERLESS_TXNS,
+        error::invalid_argument(PROLOGUE_ETRANSACTION_EXPIRATION_TOO_FAR_IN_FUTURE),
+    );
+    assert!(nonce_validation::check_and_insert_nonce(sender, nonce, txn_expiration_time), error::invalid_argument(PROLOGUE_ENONCE_ALREADY_USED));
 }
 
@@ -383,16 +732,17 @@ Only called during genesis to initialize system resources for this module. chain_id: u8, _script_hash: vector<u8>, ) { - let gas_payer = signer::address_of(&sender); + // prologue_common with is_simulation set to false behaves identically to the original script_prologue function. prologue_common( - sender, - gas_payer, - txn_sequence_number, - txn_public_key, + &sender, + &sender, + ReplayProtector::SequenceNumber(txn_sequence_number), + option::some(txn_public_key), txn_gas_price, txn_max_gas_units, txn_expiration_time, - chain_id + chain_id, + false, ) }
@@ -401,13 +751,13 @@ Only called during genesis to initialize system resources for this module. - + -## Function `multi_agent_script_prologue` +## Function `script_prologue_extended` -
fun multi_agent_script_prologue(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
fun script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_public_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, _script_hash: vector<u8>, is_simulation: bool)
 
@@ -416,29 +766,28 @@ Only called during genesis to initialize system resources for this module. Implementation -
fun multi_agent_script_prologue(
+
fun script_prologue_extended(
     sender: signer,
     txn_sequence_number: u64,
-    txn_sender_public_key: vector<u8>,
-    secondary_signer_addresses: vector<address>,
-    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    txn_public_key: vector<u8>,
     txn_gas_price: u64,
     txn_max_gas_units: u64,
     txn_expiration_time: u64,
     chain_id: u8,
+    _script_hash: vector<u8>,
+    is_simulation: bool,
 ) {
-    let sender_addr = signer::address_of(&sender);
     prologue_common(
-        sender,
-        sender_addr,
-        txn_sequence_number,
-        txn_sender_public_key,
+        &sender,
+        &sender,
+        ReplayProtector::SequenceNumber(txn_sequence_number),
+        option::some(txn_public_key),
         txn_gas_price,
         txn_max_gas_units,
         txn_expiration_time,
         chain_id,
-    );
-    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes);
+        is_simulation,
+    )
 }
 
@@ -446,13 +795,13 @@ Only called during genesis to initialize system resources for this module. - + -## Function `multi_agent_common_prologue` +## Function `multi_agent_script_prologue` -
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>)
+
fun multi_agent_script_prologue(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
 
@@ -461,9 +810,111 @@ Only called during genesis to initialize system resources for this module. Implementation -
fun multi_agent_common_prologue(
+
fun multi_agent_script_prologue(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
     secondary_signer_addresses: vector<address>,
     secondary_signer_public_key_hashes: vector<vector<u8>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+) {
+    // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the
+    // original multi_agent_script_prologue function.
+    prologue_common(
+        &sender,
+        &sender,
+        ReplayProtector::SequenceNumber(txn_sequence_number),
+        option::some(txn_sender_public_key),
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        false,
+    );
+    multi_agent_common_prologue(
+        secondary_signer_addresses,
+        vector::map(secondary_signer_public_key_hashes, |x| option::some(x)),
+        false
+    );
+}
+
+ + + + + + + +## Function `multi_agent_script_prologue_extended` + + + +
fun multi_agent_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun multi_agent_script_prologue_extended(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    prologue_common(
+        &sender,
+        &sender,
+        ReplayProtector::SequenceNumber(txn_sequence_number),
+        option::some(txn_sender_public_key),
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    );
+    multi_agent_common_prologue(
+        secondary_signer_addresses,
+        vector::map(secondary_signer_public_key_hashes, |x| option::some(x)),
+        is_simulation
+    );
+}
+
+ + + +
+ + + +## Function `multi_agent_common_prologue` + + + +
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun multi_agent_common_prologue(
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<Option<vector<u8>>>,
+    is_simulation: bool,
 ) {
     let num_secondary_signers = vector::length(&secondary_signer_addresses);
     assert!(
@@ -473,23 +924,49 @@ Only called during genesis to initialize system resources for this module.
 
     let i = 0;
     while ({
-        spec {
-            invariant i <= num_secondary_signers;
-            invariant forall j in 0..i:
-                account::exists_at(secondary_signer_addresses[j])
-                    && secondary_signer_public_key_hashes[j]
-                    == account::get_authentication_key(secondary_signer_addresses[j]);
-        };
+        // spec {
+        //     invariant i <= num_secondary_signers;
+        //     invariant forall j in 0..i:
+        //         account::exists_at(secondary_signer_addresses[j]);
+        //     invariant forall j in 0..i:
+        //         secondary_signer_public_key_hashes[j] == account::get_authentication_key(secondary_signer_addresses[j]) ||
+        //             (features::spec_simulation_enhancement_enabled() && is_simulation && vector::is_empty(secondary_signer_public_key_hashes[j]));
+        //         account::account_resource_exists_at(secondary_signer_addresses[j])
+        //         && secondary_signer_public_key_hashes[j]
+        //             == account::get_authentication_key(secondary_signer_addresses[j])
+        //             || features::account_abstraction_enabled() && account_abstraction::using_native_authenticator(
+        //             secondary_signer_addresses[j]
+        //         ) && option::spec_some(secondary_signer_public_key_hashes[j]) == account_abstraction::native_authenticator(
+        //         account::exists_at(secondary_signer_addresses[j])
+        //         && secondary_signer_public_key_hashes[j]
+        //             == account::spec_get_authentication_key(secondary_signer_addresses[j])
+        //             || features::spec_account_abstraction_enabled() && account_abstraction::using_native_authenticator(
+        //             secondary_signer_addresses[j]
+        //         ) && option::spec_some(
+        //             secondary_signer_public_key_hashes[j]
+        //         ) == account_abstraction::spec_native_authenticator(
+        //             secondary_signer_addresses[j]
+        //         );
+        // };
         (i < num_secondary_signers)
     }) {
         let secondary_address = *vector::borrow(&secondary_signer_addresses, i);
         assert!(account::exists_at(secondary_address), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST));
-
         let signer_public_key_hash = *vector::borrow(&secondary_signer_public_key_hashes, i);
-        assert!(
-            signer_public_key_hash == account::get_authentication_key(secondary_address),
-            error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
-        );
+        if (!skip_auth_key_check(is_simulation, &signer_public_key_hash)) {
+            if (option::is_some(&signer_public_key_hash)) {
+                assert!(
+                    signer_public_key_hash == option::some(account::get_authentication_key(secondary_address)),
+                    error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY)
+                );
+            } else {
+                assert!(
+                    allow_missing_txn_authentication_key(secondary_address),
+                    error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY)
+                )
+            };
+        };
+
         i = i + 1;
     }
 }
@@ -528,17 +1005,24 @@ Only called during genesis to initialize system resources for this module.
     chain_id: u8,
 ) {
     assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED));
+    // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the
+    // original fee_payer_script_prologue function.
     prologue_common(
-        sender,
-        fee_payer_address,
-        txn_sequence_number,
-        txn_sender_public_key,
+        &sender,
+        &create_signer::create_signer(fee_payer_address),
+        ReplayProtector::SequenceNumber(txn_sequence_number),
+        option::some(txn_sender_public_key),
         txn_gas_price,
         txn_max_gas_units,
         txn_expiration_time,
         chain_id,
+        false,
+    );
+    multi_agent_common_prologue(
+        secondary_signer_addresses,
+        vector::map(secondary_signer_public_key_hashes, |x| option::some(x)),
+        false
     );
-    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes);
     assert!(
         fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address),
         error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
@@ -548,6 +1032,65 @@ Only called during genesis to initialize system resources for this module.
 
 
 
+
+ + + +## Function `fee_payer_script_prologue_extended` + + + +
fun fee_payer_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, fee_payer_address: address, fee_payer_public_key_hash: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun fee_payer_script_prologue_extended(
+    sender: signer,
+    txn_sequence_number: u64,
+    txn_sender_public_key: vector<u8>,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<vector<u8>>,
+    fee_payer_address: address,
+    fee_payer_public_key_hash: vector<u8>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED));
+    prologue_common(
+        &sender,
+        &create_signer::create_signer(fee_payer_address),
+        ReplayProtector::SequenceNumber(txn_sequence_number),
+        option::some(txn_sender_public_key),
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    );
+    multi_agent_common_prologue(
+        secondary_signer_addresses,
+        vector::map(secondary_signer_public_key_hashes, |x| option::some(x)),
+        is_simulation
+    );
+    if (!skip_auth_key_check(is_simulation, &option::some(fee_payer_public_key_hash))) {
+            assert!(
+                fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY),
+            )
+    }
+}
+
+ + +
@@ -572,10 +1115,468 @@ Called by the Adapter storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, + gas_units_remaining: u64, +) { + let addr = signer::address_of(&account); + epilogue_gas_payer( + account, + addr, + storage_fee_refunded, + txn_gas_price, + txn_max_gas_units, + gas_units_remaining + ); +} +
+ + + + + + + +## Function `epilogue_extended` + + + +
fun epilogue_extended(account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun epilogue_extended(
+    account: signer,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+    is_simulation: bool,
+) {
+    let addr = signer::address_of(&account);
+    epilogue_gas_payer_extended(
+        account,
+        addr,
+        storage_fee_refunded,
+        txn_gas_price,
+        txn_max_gas_units,
+        gas_units_remaining,
+        is_simulation
+    );
+}
+
+ + + +
+ + + +## Function `epilogue_gas_payer` + +Epilogue function with explicit gas payer specified, is run after a transaction is successfully executed. +Called by the Adapter + + +
fun epilogue_gas_payer(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64)
+
+ + + +
+Implementation + + +
fun epilogue_gas_payer(
+    account: signer,
+    gas_payer: address,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
     gas_units_remaining: u64
 ) {
+    // epilogue_gas_payer_extended with is_simulation set to false behaves identically to the original
+    // epilogue_gas_payer function.
+    epilogue_gas_payer_extended(
+        account,
+        gas_payer,
+        storage_fee_refunded,
+        txn_gas_price,
+        txn_max_gas_units,
+        gas_units_remaining,
+        false,
+    );
+}
+
+ + + +
+ + + +## Function `epilogue_gas_payer_extended` + + + +
fun epilogue_gas_payer_extended(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun epilogue_gas_payer_extended(
+    account: signer,
+    gas_payer: address,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+    is_simulation: bool,
+) {
+    assert!(txn_max_gas_units >= gas_units_remaining, error::invalid_argument(EOUT_OF_GAS));
+    let gas_used = txn_max_gas_units - gas_units_remaining;
+
+    assert!(
+        (txn_gas_price as u128) * (gas_used as u128) <= MAX_U64,
+        error::out_of_range(EOUT_OF_GAS)
+    );
+    let transaction_fee_amount = txn_gas_price * gas_used;
+
+    // it's important to maintain the error code consistent with vm
+    // to do failed transaction cleanup.
+    if (!skip_gas_payment(is_simulation, gas_payer)) {
+        if (features::operations_default_to_fa_apt_store_enabled()) {
+            assert!(
+                aptos_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount),
+                error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
+            );
+        } else {
+            assert!(
+                coin::is_balance_at_least<AptosCoin>(gas_payer, transaction_fee_amount),
+                error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
+            );
+        };
+
+        if (transaction_fee_amount > storage_fee_refunded) {
+            let burn_amount = transaction_fee_amount - storage_fee_refunded;
+            transaction_fee::burn_fee(gas_payer, burn_amount);
+        } else if (transaction_fee_amount < storage_fee_refunded) {
+            let mint_amount = storage_fee_refunded - transaction_fee_amount;
+            transaction_fee::mint_and_refund(gas_payer, mint_amount);
+        };
+    };
+
+    // Increment sequence number
     let addr = signer::address_of(&account);
-    epilogue_gas_payer(account, addr, storage_fee_refunded, txn_gas_price, txn_max_gas_units, gas_units_remaining);
+    account::increment_sequence_number(addr);
+}
+
+ + + +
+ + + +## Function `skip_auth_key_check` + + + +
fun skip_auth_key_check(is_simulation: bool, auth_key: &option::Option<vector<u8>>): bool
+
+ + + +
+Implementation + + +
inline fun skip_auth_key_check(is_simulation: bool, auth_key: &Option<vector<u8>>): bool {
+    is_simulation && (option::is_none(auth_key) || vector::is_empty(option::borrow(auth_key)))
+}
+
+ + + +
+ + + +## Function `skip_gas_payment` + + + +
fun skip_gas_payment(is_simulation: bool, gas_payer: address): bool
+
+ + + +
+Implementation + + +
inline fun skip_gas_payment(is_simulation: bool, gas_payer: address): bool {
+    is_simulation && gas_payer == @0x0
+}
+
+ + + +
+ + + +## Function `unified_prologue` + +new set of functions + + +
fun unified_prologue(sender: signer, txn_sender_public_key: option::Option<vector<u8>>, txn_sequence_number: u64, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun unified_prologue(
+    sender: signer,
+    // None means no need to check, i.e. either AA (where it is already checked) or simulation
+    txn_sender_public_key: Option<vector<u8>>,
+    txn_sequence_number: u64,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<Option<vector<u8>>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    unified_prologue_v2(
+        sender,
+        txn_sender_public_key,
+        ReplayProtector::SequenceNumber(txn_sequence_number),
+        secondary_signer_addresses,
+        secondary_signer_public_key_hashes,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+
+    )
+}
+
+ + + +
+ + + +## Function `unified_prologue_fee_payer` + +If there is no fee_payer, fee_payer = sender + + +
fun unified_prologue_fee_payer(sender: signer, fee_payer: signer, txn_sender_public_key: option::Option<vector<u8>>, fee_payer_public_key_hash: option::Option<vector<u8>>, txn_sequence_number: u64, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun unified_prologue_fee_payer(
+    sender: signer,
+    fee_payer: signer,
+    // None means no need to check, i.e. either AA (where it is already checked) or simulation
+    txn_sender_public_key: Option<vector<u8>>,
+    // None means no need to check, i.e. either AA (where it is already checked) or simulation
+    fee_payer_public_key_hash: Option<vector<u8>>,
+    txn_sequence_number: u64,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<Option<vector<u8>>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    unified_prologue_fee_payer_v2(
+        sender,
+        fee_payer,
+        txn_sender_public_key,
+        fee_payer_public_key_hash,
+        ReplayProtector::SequenceNumber(txn_sequence_number),
+        secondary_signer_addresses,
+        secondary_signer_public_key_hashes,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    )
+}
+
+ + + +
+ + + +## Function `unified_epilogue` + + + +
fun unified_epilogue(account: signer, gas_payer: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun unified_epilogue(
+    account: signer,
+    gas_payer: signer,
+    storage_fee_refunded: u64,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    gas_units_remaining: u64,
+    is_simulation: bool,
+) {
+    unified_epilogue_v2(
+        account,
+        gas_payer,
+        storage_fee_refunded,
+        txn_gas_price,
+        txn_max_gas_units,
+        gas_units_remaining,
+        is_simulation,
+        false,
+    )
+}
+
+ + + +
+ + + +## Function `unified_prologue_v2` + +new set of functions to support txn payload v2 format and orderless transactions + + +
fun unified_prologue_v2(sender: signer, txn_sender_public_key: option::Option<vector<u8>>, replay_protector: transaction_validation::ReplayProtector, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun unified_prologue_v2(
+    sender: signer,
+    txn_sender_public_key: Option<vector<u8>>,
+    replay_protector: ReplayProtector,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<Option<vector<u8>>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    prologue_common(
+        &sender,
+        &sender,
+        replay_protector,
+        txn_sender_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    );
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation);
+}
+
+ + + +
+ + + +## Function `unified_prologue_fee_payer_v2` + +If there is no fee_payer, fee_payer = sender + + +
fun unified_prologue_fee_payer_v2(sender: signer, fee_payer: signer, txn_sender_public_key: option::Option<vector<u8>>, fee_payer_public_key_hash: option::Option<vector<u8>>, replay_protector: transaction_validation::ReplayProtector, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + +
+Implementation + + +
fun unified_prologue_fee_payer_v2(
+    sender: signer,
+    fee_payer: signer,
+    txn_sender_public_key: Option<vector<u8>>,
+    fee_payer_public_key_hash: Option<vector<u8>>,
+    replay_protector: ReplayProtector,
+    secondary_signer_addresses: vector<address>,
+    secondary_signer_public_key_hashes: vector<Option<vector<u8>>>,
+    txn_gas_price: u64,
+    txn_max_gas_units: u64,
+    txn_expiration_time: u64,
+    chain_id: u8,
+    is_simulation: bool,
+) {
+    prologue_common(
+        &sender,
+        &fee_payer,
+        replay_protector,
+        txn_sender_public_key,
+        txn_gas_price,
+        txn_max_gas_units,
+        txn_expiration_time,
+        chain_id,
+        is_simulation,
+    );
+    multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation);
+    if (!skip_auth_key_check(is_simulation, &fee_payer_public_key_hash)) {
+        let fee_payer_address = signer::address_of(&fee_payer);
+        if (option::is_some(&fee_payer_public_key_hash)) {
+            assert!(
+                fee_payer_public_key_hash == option::some(account::get_authentication_key(fee_payer_address)),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY)
+            );
+        } else {
+            assert!(
+                allow_missing_txn_authentication_key(fee_payer_address),
+                error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY)
+            )
+        };
+    }
 }
 
@@ -583,15 +1584,13 @@ Called by the Adapter
- + -## Function `epilogue_gas_payer` +## Function `unified_epilogue_v2` -Epilogue function with explicit gas payer specified, is run after a transaction is successfully executed. -Called by the Adapter -
fun epilogue_gas_payer(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64)
+
fun unified_epilogue_v2(account: signer, gas_payer: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool, is_orderless_txn: bool)
 
@@ -600,13 +1599,15 @@ Called by the Adapter Implementation -
fun epilogue_gas_payer(
+
fun unified_epilogue_v2(
     account: signer,
-    gas_payer: address,
+    gas_payer: signer,
     storage_fee_refunded: u64,
     txn_gas_price: u64,
     txn_max_gas_units: u64,
-    gas_units_remaining: u64
+    gas_units_remaining: u64,
+    is_simulation: bool,
+    is_orderless_txn: bool,
 ) {
     assert!(txn_max_gas_units >= gas_units_remaining, error::invalid_argument(EOUT_OF_GAS));
     let gas_used = txn_max_gas_units - gas_units_remaining;
@@ -617,52 +1618,49 @@ Called by the Adapter
     );
     let transaction_fee_amount = txn_gas_price * gas_used;
 
+    let gas_payer_address = signer::address_of(&gas_payer);
     // it's important to maintain the error code consistent with vm
     // to do failed transaction cleanup.
-    if (features::operations_default_to_fa_apt_store_enabled()) {
-        assert!(
-            aptos_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount),
-            error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
-        );
-    } else {
-        assert!(
-            coin::is_balance_at_least<AptosCoin>(gas_payer, transaction_fee_amount),
-            error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
-        );
-    };
-
-    let amount_to_burn = if (features::collect_and_distribute_gas_fees()) {
-        // TODO(gas): We might want to distinguish the refundable part of the charge and burn it or track
-        // it separately, so that we don't increase the total supply by refunding.
-
-        // If transaction fees are redistributed to validators, collect them here for
-        // later redistribution.
-        transaction_fee::collect_fee(gas_payer, transaction_fee_amount);
-        0
-    } else {
-        // Otherwise, just burn the fee.
-        // TODO: this branch should be removed completely when transaction fee collection
-        // is tested and is fully proven to work well.
-        transaction_fee_amount
-    };
-
-    if (amount_to_burn > storage_fee_refunded) {
-        let burn_amount = amount_to_burn - storage_fee_refunded;
-        if (features::governed_gas_pool_enabled()) {
-            governed_gas_pool::deposit_gas_fee_v2(gas_payer, burn_amount);
+    if (!skip_gas_payment(
+        is_simulation,
+        gas_payer_address
+    )) {
+        if (features::operations_default_to_fa_apt_store_enabled()) {
+            assert!(
+                aptos_account::is_fungible_balance_at_least(gas_payer_address, transaction_fee_amount),
+                error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
+            );
         } else {
-            transaction_fee::burn_fee(gas_payer, burn_amount);
-        }
-    } else if (amount_to_burn < storage_fee_refunded) {
-        let mint_amount = storage_fee_refunded - amount_to_burn;
-        if (!features::governed_gas_pool_enabled()) {
-            transaction_fee::mint_and_refund(gas_payer, mint_amount);
-        }
+            assert!(
+                coin::is_balance_at_least<AptosCoin>(gas_payer_address, transaction_fee_amount),
+                error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT),
+            );
+        };
+
+        if (transaction_fee_amount > storage_fee_refunded) {
+            let burn_amount = transaction_fee_amount - storage_fee_refunded;
+            transaction_fee::burn_fee(gas_payer_address, burn_amount);
+            permissioned_signer::check_permission_consume(
+                &gas_payer,
+                (burn_amount as u256),
+                GasPermission {}
+            );
+        } else if (transaction_fee_amount < storage_fee_refunded) {
+            let mint_amount = storage_fee_refunded - transaction_fee_amount;
+            transaction_fee::mint_and_refund(gas_payer_address, mint_amount);
+            permissioned_signer::increase_limit(
+                &gas_payer,
+                (mint_amount as u256),
+                GasPermission {}
+            );
+        };
     };
 
-    // Increment sequence number
-    let addr = signer::address_of(&account);
-    account::increment_sequence_number(addr);
+    if (!is_orderless_txn) {
+        // Increment sequence number
+        let addr = signer::address_of(&account);
+        account::increment_sequence_number(addr);
+    }
 }
 
@@ -726,6 +1724,38 @@ Called by the Adapter + + +### Function `grant_gas_permission` + + +
public fun grant_gas_permission(master: &signer, permissioned: &signer, gas_amount: u64)
+
+ + + + +
pragma aborts_if_is_partial;
+
+ + + + + +### Function `revoke_gas_permission` + + +
public fun revoke_gas_permission(permissioned: &signer)
+
+ + + + +
pragma aborts_if_is_partial;
+
+ + + ### Function `initialize` @@ -754,10 +1784,10 @@ Give some constraints that may abort according to the conditions.
schema PrologueCommonAbortsIf {
-    sender: signer;
-    gas_payer: address;
-    txn_sequence_number: u64;
-    txn_authentication_key: vector<u8>;
+    sender: &signer;
+    gas_payer: &signer;
+    replay_protector: ReplayProtector;
+    txn_authentication_key: Option<vector<u8>>;
     txn_gas_price: u64;
     txn_max_gas_units: u64;
     txn_expiration_time: u64;
@@ -767,28 +1797,7 @@ Give some constraints that may abort according to the conditions.
     aborts_if !exists<ChainId>(@aptos_framework);
     aborts_if !(chain_id::get() == chain_id);
     let transaction_sender = signer::address_of(sender);
-    aborts_if (
-        !features::spec_is_enabled(features::SPONSORED_AUTOMATIC_ACCOUNT_CREATION)
-            || account::exists_at(transaction_sender)
-            || transaction_sender == gas_payer
-            || txn_sequence_number > 0
-    ) && (
-        !(txn_sequence_number >= global<Account>(transaction_sender).sequence_number)
-            || !(txn_authentication_key == global<Account>(transaction_sender).authentication_key)
-            || !account::exists_at(transaction_sender)
-            || !(txn_sequence_number == global<Account>(transaction_sender).sequence_number)
-    );
-    aborts_if features::spec_is_enabled(features::SPONSORED_AUTOMATIC_ACCOUNT_CREATION)
-        && transaction_sender != gas_payer
-        && txn_sequence_number == 0
-        && !account::exists_at(transaction_sender)
-        && txn_authentication_key != bcs::to_bytes(transaction_sender);
-    aborts_if !(txn_sequence_number < (1u64 << 63));
-    let max_transaction_fee = txn_gas_price * txn_max_gas_units;
-    aborts_if max_transaction_fee > MAX_U64;
-    aborts_if !exists<CoinStore<AptosCoin>>(gas_payer);
-    // This enforces high-level requirement 1:
-    aborts_if !(global<CoinStore<AptosCoin>>(gas_payer).coin.value >= max_transaction_fee);
+    let gas_payer_addr = signer::address_of(gas_payer);
 }
 
@@ -799,7 +1808,7 @@ Give some constraints that may abort according to the conditions. ### Function `prologue_common` -
fun prologue_common(sender: signer, gas_payer: address, txn_sequence_number: u64, txn_authentication_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8)
+
fun prologue_common(sender: &signer, gas_payer: &signer, replay_protector: transaction_validation::ReplayProtector, txn_authentication_key: option::Option<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
 
@@ -811,6 +1820,38 @@ Give some constraints that may abort according to the conditions. + + +### Function `check_for_replay_protection_regular_txn` + + +
fun check_for_replay_protection_regular_txn(sender_address: address, gas_payer_address: address, txn_sequence_number: u64)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `check_for_replay_protection_orderless_txn` + + +
fun check_for_replay_protection_orderless_txn(sender: address, nonce: u64, txn_expiration_time: u64)
+
+ + + + +
pragma verify = false;
+
+ + + ### Function `script_prologue` @@ -823,10 +1864,6 @@ Give some constraints that may abort according to the conditions.
pragma verify = false;
-include PrologueCommonAbortsIf {
-    gas_payer: signer::address_of(sender),
-    txn_authentication_key: txn_public_key
-};
 
@@ -837,23 +1874,64 @@ Give some constraints that may abort according to the conditions.
schema MultiAgentPrologueCommonAbortsIf {
     secondary_signer_addresses: vector<address>;
-    secondary_signer_public_key_hashes: vector<vector<u8>>;
+    secondary_signer_public_key_hashes: vector<Option<vector<u8>>>;
+    is_simulation: bool;
     let num_secondary_signers = len(secondary_signer_addresses);
     aborts_if len(secondary_signer_public_key_hashes) != num_secondary_signers;
     // This enforces high-level requirement 2:
     aborts_if exists i in 0..num_secondary_signers:
-        !account::exists_at(secondary_signer_addresses[i])
-            || secondary_signer_public_key_hashes[i] !=
-            account::get_authentication_key(secondary_signer_addresses[i]);
+        !account::spec_exists_at(secondary_signer_addresses[i]);
+    aborts_if exists i in 0..num_secondary_signers:
+        !can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]) &&
+            option::spec_is_some(secondary_signer_public_key_hashes[i]) && option::spec_borrow(
+            secondary_signer_public_key_hashes[i]
+        ) !=
+                account::spec_get_authentication_key(secondary_signer_addresses[i]);
+    ensures forall i in 0..num_secondary_signers:
+        account::spec_exists_at(secondary_signer_addresses[i]);
     ensures forall i in 0..num_secondary_signers:
-        account::exists_at(secondary_signer_addresses[i])
-            && secondary_signer_public_key_hashes[i] ==
-            account::get_authentication_key(secondary_signer_addresses[i]);
+        option::spec_is_none(secondary_signer_public_key_hashes[i]) || option::spec_borrow(
+            secondary_signer_public_key_hashes[i]
+        ) ==
+            account::spec_get_authentication_key(secondary_signer_addresses[i])
+            || can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]);
+}
+
+ + + + + + + +
fun can_skip(feature_flag: bool, is_simulation: bool, auth_key: Option<vector<u8>>): bool {
+   features::spec_simulation_enhancement_enabled() && is_simulation && option::spec_is_none(auth_key)
 }
 
+ + +### Function `script_prologue_extended` + + +
fun script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_public_key: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, _script_hash: vector<u8>, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+include PrologueCommonAbortsIf {
+    gas_payer: sender,
+    txn_authentication_key: option::spec_some(txn_public_key),
+    replay_protector: ReplayProtector::SequenceNumber(txn_sequence_number),
+};
+
+ + + ### Function `multi_agent_script_prologue` @@ -863,22 +1941,29 @@ Give some constraints that may abort according to the conditions.
+ + +
pragma verify = false;
+
+ + + + + +### Function `multi_agent_script_prologue_extended` + + +
fun multi_agent_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + Aborts if length of public key hashed vector not equal the number of singers.
pragma verify_duration_estimate = 120;
-let gas_payer = signer::address_of(sender);
+let gas_payer = sender;
 pragma verify = false;
-include PrologueCommonAbortsIf {
-    gas_payer,
-    txn_sequence_number,
-    txn_authentication_key: txn_sender_public_key,
-};
-include MultiAgentPrologueCommonAbortsIf {
-    secondary_signer_addresses,
-    secondary_signer_public_key_hashes,
-};
 
@@ -888,16 +1973,13 @@ not equal the number of singers. ### Function `multi_agent_common_prologue` -
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>)
+
fun multi_agent_common_prologue(secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, is_simulation: bool)
 
-
include MultiAgentPrologueCommonAbortsIf {
-    secondary_signer_addresses,
-    secondary_signer_public_key_hashes,
-};
+
pragma aborts_if_is_partial;
 
@@ -913,20 +1995,33 @@ not equal the number of singers. -
pragma verify_duration_estimate = 120;
+
pragma verify = false;
+
+ + + + + +### Function `fee_payer_script_prologue_extended` + + +
fun fee_payer_script_prologue_extended(sender: signer, txn_sequence_number: u64, txn_sender_public_key: vector<u8>, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<vector<u8>>, fee_payer_address: address, fee_payer_public_key_hash: vector<u8>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + + +
pragma aborts_if_is_partial;
+pragma verify_duration_estimate = 120;
 aborts_if !features::spec_is_enabled(features::FEE_PAYER_ENABLED);
-let gas_payer = fee_payer_address;
+let gas_payer = create_signer::create_signer(fee_payer_address);
 include PrologueCommonAbortsIf {
     gas_payer,
-    txn_sequence_number,
-    txn_authentication_key: txn_sender_public_key,
-};
-include MultiAgentPrologueCommonAbortsIf {
-    secondary_signer_addresses,
-    secondary_signer_public_key_hashes,
+    replay_protector: ReplayProtector::SequenceNumber(txn_sequence_number),
+    txn_authentication_key: option::spec_some(txn_sender_public_key),
 };
-aborts_if !account::exists_at(gas_payer);
-aborts_if !(fee_payer_public_key_hash == account::get_authentication_key(gas_payer));
+aborts_if !account::spec_exists_at(fee_payer_address);
+aborts_if !(fee_payer_public_key_hash == account::spec_get_authentication_key(fee_payer_address));
 aborts_if !features::spec_fee_payer_enabled();
 
@@ -941,6 +2036,22 @@ not equal the number of singers.
+ + +
pragma verify = false;
+
+ + + + + +### Function `epilogue_extended` + + +
fun epilogue_extended(account: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + Abort according to the conditions. AptosCoinCapabilities and CoinInfo should exists. Skip transaction_fee::burn_fee verification. @@ -961,6 +2072,22 @@ Skip transaction_fee::burn_fee verification.
+ + +
pragma verify = false;
+
+ + + + + +### Function `epilogue_gas_payer_extended` + + +
fun epilogue_gas_payer_extended(account: signer, gas_payer: address, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + Abort according to the conditions. AptosCoinCapabilities and CoinInfo should exist. Skip transaction_fee::burn_fee verification. @@ -972,6 +2099,102 @@ Skip transaction_fee::burn_fee verification. + + +### Function `unified_prologue` + + +
fun unified_prologue(sender: signer, txn_sender_public_key: option::Option<vector<u8>>, txn_sequence_number: u64, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `unified_prologue_fee_payer` + + +
fun unified_prologue_fee_payer(sender: signer, fee_payer: signer, txn_sender_public_key: option::Option<vector<u8>>, fee_payer_public_key_hash: option::Option<vector<u8>>, txn_sequence_number: u64, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `unified_epilogue` + + +
fun unified_epilogue(account: signer, gas_payer: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `unified_prologue_v2` + + +
fun unified_prologue_v2(sender: signer, txn_sender_public_key: option::Option<vector<u8>>, replay_protector: transaction_validation::ReplayProtector, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `unified_prologue_fee_payer_v2` + + +
fun unified_prologue_fee_payer_v2(sender: signer, fee_payer: signer, txn_sender_public_key: option::Option<vector<u8>>, fee_payer_public_key_hash: option::Option<vector<u8>>, replay_protector: transaction_validation::ReplayProtector, secondary_signer_addresses: vector<address>, secondary_signer_public_key_hashes: vector<option::Option<vector<u8>>>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, is_simulation: bool)
+
+ + + + +
pragma verify = false;
+
+ + + + + +### Function `unified_epilogue_v2` + + +
fun unified_epilogue_v2(account: signer, gas_payer: signer, storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64, is_simulation: bool, is_orderless_txn: bool)
+
+ + + + +
pragma verify = false;
+
+ + + @@ -983,36 +2206,18 @@ Skip transaction_fee::burn_fee verification. txn_gas_price: u64; txn_max_gas_units: u64; gas_units_remaining: u64; - requires exists<GovernedGasPool>(@aptos_framework); - requires exists<CoinStore<AptosCoin>>(governed_gas_pool_address()); aborts_if !(txn_max_gas_units >= gas_units_remaining); let gas_used = txn_max_gas_units - gas_units_remaining; aborts_if !(txn_gas_price * gas_used <= MAX_U64); let transaction_fee_amount = txn_gas_price * gas_used; let addr = signer::address_of(account); - let pre_governed_gas_pool_balance = global<coin::CoinStore<AptosCoin>>(governed_gas_pool_address()).coin.value; - let post governed_gas_pool_balance = global<coin::CoinStore<AptosCoin>>(governed_gas_pool_address()).coin.value; let pre_account = global<account::Account>(addr); let post account = global<account::Account>(addr); aborts_if !exists<CoinStore<AptosCoin>>(gas_payer); aborts_if !exists<Account>(addr); aborts_if !(global<Account>(addr).sequence_number < MAX_U64); - ensures governed_gas_pool_balance == pre_governed_gas_pool_balance + transaction_fee_amount; ensures account.sequence_number == pre_account.sequence_number + 1; - let governed_gas_pool_enabled = features::spec_is_enabled(features::GOVERNED_GAS_POOL); - let collect_fee_enabled = features::spec_is_enabled(features::COLLECT_AND_DISTRIBUTE_GAS_FEES); - let collected_fees = global<CollectedFeesPerBlock>(@aptos_framework).amount; - let aggr = collected_fees.value; - let aggr_val = aggregator::spec_aggregator_get_val(aggr); - let aggr_lim = aggregator::spec_get_limit(aggr); - // This enforces high-level requirement 3: - aborts_if collect_fee_enabled && !exists<CollectedFeesPerBlock>(@aptos_framework); - aborts_if collect_fee_enabled && transaction_fee_amount > 0 && aggr_val + transaction_fee_amount > aggr_lim; - let amount_to_burn = if (collect_fee_enabled) { - 0 - } else { - transaction_fee_amount - storage_fee_refunded - }; + let amount_to_burn = transaction_fee_amount - storage_fee_refunded; let apt_addr = type_info::type_of<AptosCoin>().account_address; let maybe_apt_supply = global<CoinInfo<AptosCoin>>(apt_addr).supply; let total_supply_enabled = option::spec_is_some(maybe_apt_supply); @@ -1025,11 +2230,7 @@ Skip transaction_fee::burn_fee verification. aborts_if amount_to_burn > 0 && !exists<CoinInfo<AptosCoin>>(apt_addr); aborts_if amount_to_burn > 0 && total_supply_enabled && apt_supply_value < amount_to_burn; ensures total_supply_enabled ==> apt_supply_value - amount_to_burn == post_apt_supply_value; - let amount_to_mint = if (collect_fee_enabled) { - storage_fee_refunded - } else { - storage_fee_refunded - transaction_fee_amount - }; + let amount_to_mint = storage_fee_refunded - transaction_fee_amount; let total_supply = coin::supply<AptosCoin>; let post post_total_supply = coin::supply<AptosCoin>; aborts_if amount_to_mint > 0 && !exists<CoinStore<AptosCoin>>(addr); diff --git a/aptos-move/framework/aptos-framework/doc/util.md b/aptos-move/framework/aptos-framework/doc/util.md index 58f3e29748c93..ca6b1f1264c0e 100644 --- a/aptos-move/framework/aptos-framework/doc/util.md +++ b/aptos-move/framework/aptos-framework/doc/util.md @@ -29,6 +29,8 @@ Note that this function does not put any constraint on T. If code u deserialized a linear value, its their responsibility that the data they deserialize is owned. +Function would abort if T has signer in it. +
public(friend) fun from_bytes<T>(bytes: vector<u8>): T
 
diff --git a/aptos-move/framework/aptos-framework/doc/version.md b/aptos-move/framework/aptos-framework/doc/version.md index c201f455c86f1..5ebdcb989128f 100644 --- a/aptos-move/framework/aptos-framework/doc/version.md +++ b/aptos-move/framework/aptos-framework/doc/version.md @@ -134,7 +134,7 @@ Publishes the Version config. system_addresses::assert_aptos_framework(aptos_framework); move_to(aptos_framework, Version { major: initial_version }); - // Give aptos framework account capability to call set version. This allows on chain governance to do it through + // Give aptos framework account capability to call set version. This allows on chain governance to do it through // control of the aptos framework account. move_to(aptos_framework, SetVersionCapability {}); } @@ -233,7 +233,7 @@ Only used in reconfigurations to apply the pending on_new_epoch(framework: &signer) acquires Version { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist<Version>()) { - let new_value = config_buffer::extract<Version>(); + let new_value = config_buffer::extract_v2<Version>(); if (exists<Version>(@aptos_framework)) { *borrow_global_mut<Version>(@aptos_framework) = new_value; } else { @@ -358,11 +358,9 @@ Abort if resource already exists in @aptos_framwork when initializi
pragma verify_duration_estimate = 120;
-include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
 include staking_config::StakingRewardsConfigRequirement;
 requires chain_status::is_genesis();
 requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time();
-requires exists<stake::ValidatorFees>(@aptos_framework);
 requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
 aborts_if !exists<SetVersionCapability>(signer::address_of(account));
 aborts_if !exists<Version>(@aptos_framework);
diff --git a/aptos-move/framework/aptos-framework/doc/vesting.md b/aptos-move/framework/aptos-framework/doc/vesting.md
index d2b59512b637d..c7f49dd651e44 100644
--- a/aptos-move/framework/aptos-framework/doc/vesting.md
+++ b/aptos-move/framework/aptos-framework/doc/vesting.md
@@ -65,7 +65,10 @@ withdrawable, admin can call admin_withdraw to withdraw all funds to the vesting
 -  [Struct `DistributeEvent`](#0x1_vesting_DistributeEvent)
 -  [Struct `TerminateEvent`](#0x1_vesting_TerminateEvent)
 -  [Struct `AdminWithdrawEvent`](#0x1_vesting_AdminWithdrawEvent)
+-  [Struct `VestPermission`](#0x1_vesting_VestPermission)
 -  [Constants](#@Constants_0)
+-  [Function `check_vest_permission`](#0x1_vesting_check_vest_permission)
+-  [Function `grant_permission`](#0x1_vesting_grant_permission)
 -  [Function `stake_pool_address`](#0x1_vesting_stake_pool_address)
 -  [Function `vesting_start_secs`](#0x1_vesting_vesting_start_secs)
 -  [Function `period_duration_secs`](#0x1_vesting_period_duration_secs)
@@ -169,6 +172,7 @@ withdrawable, admin can call admin_withdraw to withdraw all funds to the vesting
 use 0x1::features;
 use 0x1::fixed_point32;
 use 0x1::math64;
+use 0x1::permissioned_signer;
 use 0x1::pool_u64;
 use 0x1::signer;
 use 0x1::simple_map;
@@ -1423,6 +1427,34 @@ withdrawable, admin can call admin_withdraw to withdraw all funds to the vesting
 
 
 
+
+
+
+
+## Struct `VestPermission`
+
+Permissions to mutate the vesting config for a given account.
+
+
+
struct VestPermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ +
@@ -1470,6 +1502,16 @@ Shareholders list cannot be empty. + + +Current permissioned signer cannot perform vesting operations. + + +
const ENO_VESTING_PERMISSION: u64 = 17;
+
+ + + Cannot terminate the vesting contract with pending active stake. Need to wait until next epoch. @@ -1640,6 +1682,59 @@ Vesting contract has been terminated and all funds have been released back to th + + +## Function `check_vest_permission` + +Permissions + + +
fun check_vest_permission(s: &signer)
+
+ + + +
+Implementation + + +
inline fun check_vest_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, VestPermission {}),
+        error::permission_denied(ENO_VESTING_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `grant_permission` + +Grant permission to perform vesting operations on behalf of the master signer. + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, VestPermission {})
+}
+
+ + + +
+ ## Function `stake_pool_address` @@ -2162,6 +2257,7 @@ Create a vesting contract with a given configurations. // Optional seed used when creating the staking contract account. contract_creation_seed: vector<u8>, ): address acquires AdminStore { + check_vest_permission(admin); assert!( !system_addresses::is_reserved_address(withdrawal_address), error::invalid_argument(EINVALID_WITHDRAWAL_ADDRESS), @@ -2223,19 +2319,20 @@ Create a vesting contract with a given configurations. commission_percentage, }, ); + } else { + emit_event( + &mut admin_store.create_events, + CreateVestingContractEvent { + operator, + voter, + withdrawal_address, + grant_amount, + vesting_contract_address: contract_address, + staking_pool_address: pool_address, + commission_percentage, + }, + ); }; - emit_event( - &mut admin_store.create_events, - CreateVestingContractEvent { - operator, - voter, - withdrawal_address, - grant_amount, - vesting_contract_address: contract_address, - staking_pool_address: pool_address, - commission_percentage, - }, - ); move_to(&contract_signer, VestingContract { state: VESTING_POOL_ACTIVE, @@ -2392,17 +2489,18 @@ Unlock any vested portion of the grant. amount: vested_amount, }, ); + } else { + emit_event( + &mut vesting_contract.vest_events, + VestEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + staking_pool_address: vesting_contract.staking.pool_address, + period_vested: next_period_to_vest, + amount: vested_amount, + }, + ); }; - emit_event( - &mut vesting_contract.vest_events, - VestEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - staking_pool_address: vesting_contract.staking.pool_address, - period_vested: next_period_to_vest, - amount: vested_amount, - }, - ); }
@@ -2496,15 +2594,16 @@ Distribute any withdrawable stake from the stake pool. amount: total_distribution_amount, }, ); + } else { + emit_event( + &mut vesting_contract.distribute_events, + DistributeEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + amount: total_distribution_amount, + }, + ); }; - emit_event( - &mut vesting_contract.distribute_events, - DistributeEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - amount: total_distribution_amount, - }, - ); }
@@ -2583,14 +2682,15 @@ Terminate the vesting contract and send all funds back to the withdrawal address vesting_contract_address: contract_address, }, ); + } else { + emit_event( + &mut vesting_contract.terminate_events, + TerminateEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + }, + ); }; - emit_event( - &mut vesting_contract.terminate_events, - TerminateEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - }, - ); }
@@ -2640,15 +2740,16 @@ has already been terminated. amount, }, ); + } else { + emit_event( + &mut vesting_contract.admin_withdraw_events, + AdminWithdrawEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + amount, + }, + ); }; - emit_event( - &mut vesting_contract.admin_withdraw_events, - AdminWithdrawEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - amount, - }, - ); }
@@ -2696,18 +2797,19 @@ has already been terminated. commission_percentage, }, ); + } else { + emit_event( + &mut vesting_contract.update_operator_events, + UpdateOperatorEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + staking_pool_address: vesting_contract.staking.pool_address, + old_operator, + new_operator, + commission_percentage, + }, + ); }; - emit_event( - &mut vesting_contract.update_operator_events, - UpdateOperatorEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - staking_pool_address: vesting_contract.staking.pool_address, - old_operator, - new_operator, - commission_percentage, - }, - ); }
@@ -2816,17 +2918,18 @@ has already been terminated. new_voter, }, ); - }; - emit_event( - &mut vesting_contract.update_voter_events, - UpdateVoterEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - staking_pool_address: vesting_contract.staking.pool_address, - old_voter, - new_voter, - }, - ); + } else { + emit_event( + &mut vesting_contract.update_voter_events, + UpdateVoterEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + staking_pool_address: vesting_contract.staking.pool_address, + old_voter, + new_voter, + }, + ); + } }
@@ -2867,16 +2970,17 @@ has already been terminated. new_lockup_expiration_secs: stake::get_lockup_secs(vesting_contract.staking.pool_address), }, ); + } else { + emit_event( + &mut vesting_contract.reset_lockup_events, + ResetLockupEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + staking_pool_address: vesting_contract.staking.pool_address, + new_lockup_expiration_secs: stake::get_lockup_secs(vesting_contract.staking.pool_address), + }, + ); }; - emit_event( - &mut vesting_contract.reset_lockup_events, - ResetLockupEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - staking_pool_address: vesting_contract.staking.pool_address, - new_lockup_expiration_secs: stake::get_lockup_secs(vesting_contract.staking.pool_address), - }, - ); }
@@ -2931,17 +3035,18 @@ has already been terminated. new_beneficiary, }, ); + } else { + emit_event( + &mut vesting_contract.set_beneficiary_events, + SetBeneficiaryEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + shareholder, + old_beneficiary, + new_beneficiary, + }, + ); }; - emit_event( - &mut vesting_contract.set_beneficiary_events, - SetBeneficiaryEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - shareholder, - old_beneficiary, - new_beneficiary, - }, - ); }
@@ -2971,6 +3076,7 @@ account. contract_address: address, shareholder: address, ) acquires VestingAccountManagement, VestingContract { + check_vest_permission(account); let vesting_contract = borrow_global_mut<VestingContract>(contract_address); let addr = signer::address_of(account); assert!( @@ -3011,7 +3117,7 @@ account. role: String, role_holder: address, ) acquires VestingAccountManagement, VestingContract { - let vesting_contract = borrow_global_mut<VestingContract>(contract_address); + let vesting_contract = borrow_global<VestingContract>(contract_address); verify_admin(admin, vesting_contract); if (!exists<VestingAccountManagement>(contract_address)) { @@ -3135,7 +3241,7 @@ staking_contract and stake modules.
public fun get_vesting_account_signer(admin: &signer, contract_address: address): signer acquires VestingContract {
-    let vesting_contract = borrow_global_mut<VestingContract>(contract_address);
+    let vesting_contract = borrow_global<VestingContract>(contract_address);
     verify_admin(admin, vesting_contract);
     get_vesting_account_signer_internal(vesting_contract)
 }
@@ -3190,6 +3296,7 @@ This address should be deterministic for the same admin and vesting contract cre
     admin: &signer,
     contract_creation_seed: vector<u8>,
 ): (signer, SignerCapability) acquires AdminStore {
+    check_vest_permission(admin);
     let admin_store = borrow_global_mut<AdminStore>(signer::address_of(admin));
     let seed = bcs::to_bytes(&signer::address_of(admin));
     vector::append(&mut seed, bcs::to_bytes(&admin_store.nonce));
@@ -3229,6 +3336,7 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
 
fun verify_admin(admin: &signer, vesting_contract: &VestingContract) {
+    check_vest_permission(admin);
     assert!(signer::address_of(admin) == vesting_contract.admin, error::unauthenticated(ENOT_ADMIN));
 }
 
@@ -3483,7 +3591,7 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = true;
-pragma aborts_if_is_strict;
+pragma aborts_if_is_partial;
 // This enforces high-level requirement 2:
 invariant forall a: address where exists<VestingContract>(a):
     global<VestingContract>(a).grant_pool.shareholders_limit <= MAXIMUM_SHAREHOLDERS;
@@ -3491,6 +3599,19 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
 
+
+
+
+
+
schema AbortsIfPermissionedSigner {
+    s: signer;
+    let perm = VestPermission {};
+    aborts_if !permissioned_signer::spec_check_permission_exists(s, perm);
+}
+
+ + + ### Function `stake_pool_address` @@ -3686,8 +3807,7 @@ This address should be deterministic for the same admin and vesting contract cre
schema TotalAccumulatedRewardsAbortsIf {
     vesting_contract_address: address;
-    requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100;
-    include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
+    include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address};
     let vesting_contract = global<VestingContract>(vesting_contract_address);
     let staker = vesting_contract_address;
     let operator = vesting_contract.staking.operator;
@@ -3761,7 +3881,7 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
 
-
include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
+
include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address};
 
@@ -3788,7 +3908,7 @@ This address should be deterministic for the same admin and vesting contract cre
pragma opaque;
-include ActiveVestingContractAbortsIf<VestingContract>{contract_address: vesting_contract_address};
+include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address};
 ensures [abstract] result == spec_shareholder(vesting_contract_address, shareholder_or_beneficiary);
 
@@ -3892,7 +4012,6 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = false;
 aborts_if len(contract_addresses) == 0;
-include PreconditionAbortsIf;
 
@@ -3927,7 +4046,6 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = false;
 aborts_if len(contract_addresses) == 0;
-include PreconditionAbortsIf;
 
@@ -3944,7 +4062,7 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = false;
-include ActiveVestingContractAbortsIf<VestingContract>;
+include ActiveVestingContractAbortsIf;
 let vesting_contract = global<VestingContract>(contract_address);
 include WithdrawStakeAbortsIf { vesting_contract };
 
@@ -3980,7 +4098,7 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify = false;
-include ActiveVestingContractAbortsIf<VestingContract>;
+include ActiveVestingContractAbortsIf;
 let vesting_contract = global<VestingContract>(contract_address);
 include WithdrawStakeAbortsIf { vesting_contract };
 
@@ -4126,8 +4244,7 @@ This address should be deterministic for the same admin and vesting contract cre
pragma verify_duration_estimate = 300;
 pragma aborts_if_is_partial;
-aborts_if !account::exists_at(new_beneficiary);
-aborts_if !coin::spec_is_account_registered<AptosCoin>(new_beneficiary);
+aborts_if !account::spec_exists_at(new_beneficiary);
 include VerifyAdminAbortsIf;
 let post vesting_contract = global<VestingContract>(contract_address);
 ensures simple_map::spec_contains_key(vesting_contract.beneficiaries,shareholder);
@@ -4241,7 +4358,8 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
 
-
include VerifyAdminAbortsIf;
+
pragma verify_duration_estimate = 120;
+include VerifyAdminAbortsIf;
 
@@ -4295,7 +4413,7 @@ This address should be deterministic for the same admin and vesting contract cre aborts_if !exists<AdminStore>(admin_addr); aborts_if len(account::ZERO_AUTH_KEY) != 32; aborts_if admin_store.nonce + 1 > MAX_U64; -let ea = account::exists_at(resource_addr); +let ea = account::spec_exists_at(resource_addr); include if (ea) account::CreateResourceAccountAbortsIf else account::CreateAccountAbortsIf {addr: resource_addr}; let acc = global<account::Account>(resource_addr); let post post_acc = global<account::Account>(resource_addr); @@ -4321,7 +4439,9 @@ This address should be deterministic for the same admin and vesting contract cre -
// This enforces high-level requirement 9:
+
pragma verify_duration_estimate = 120;
+aborts_if permissioned_signer::spec_is_permissioned_signer(admin);
+// This enforces high-level requirement 9:
 aborts_if signer::address_of(admin) != vesting_contract.admin;
 
@@ -4355,7 +4475,7 @@ This address should be deterministic for the same admin and vesting contract cre -
include ActiveVestingContractAbortsIf<VestingContract>;
+
include ActiveVestingContractAbortsIf;
 
@@ -4502,6 +4622,7 @@ This address should be deterministic for the same admin and vesting contract cre
schema VerifyAdminAbortsIf {
     contract_address: address;
     admin: signer;
+    aborts_if permissioned_signer::spec_is_permissioned_signer(admin);
     aborts_if !exists<VestingContract>(contract_address);
     let vesting_contract = global<VestingContract>(contract_address);
     aborts_if signer::address_of(admin) != vesting_contract.admin;
@@ -4514,7 +4635,7 @@ This address should be deterministic for the same admin and vesting contract cre
 
 
 
-
schema ActiveVestingContractAbortsIf<VestingContract> {
+
schema ActiveVestingContractAbortsIf {
     contract_address: address;
     // This enforces high-level requirement 5:
     aborts_if !exists<VestingContract>(contract_address);
diff --git a/aptos-move/framework/aptos-framework/doc/voting.md b/aptos-move/framework/aptos-framework/doc/voting.md
index dfea6bb54d7ae..b99cbe2432af4 100644
--- a/aptos-move/framework/aptos-framework/doc/voting.md
+++ b/aptos-move/framework/aptos-framework/doc/voting.md
@@ -36,7 +36,10 @@ the resolution process.
 -  [Struct `CreateProposalEvent`](#0x1_voting_CreateProposalEvent)
 -  [Struct `RegisterForumEvent`](#0x1_voting_RegisterForumEvent)
 -  [Struct `VoteEvent`](#0x1_voting_VoteEvent)
+-  [Struct `VotePermission`](#0x1_voting_VotePermission)
 -  [Constants](#@Constants_0)
+-  [Function `check_vote_permission`](#0x1_voting_check_vote_permission)
+-  [Function `grant_permission`](#0x1_voting_grant_permission)
 -  [Function `register`](#0x1_voting_register)
 -  [Function `create_proposal`](#0x1_voting_create_proposal)
 -  [Function `create_proposal_v2`](#0x1_voting_create_proposal_v2)
@@ -98,6 +101,7 @@ the resolution process.
 use 0x1::features;
 use 0x1::from_bcs;
 use 0x1::option;
+use 0x1::permissioned_signer;
 use 0x1::signer;
 use 0x1::simple_map;
 use 0x1::string;
@@ -593,6 +597,33 @@ Extra metadata (e.g. description, code url) can be part of the ProposalType stru
 
 
 
+
+
+
+
+## Struct `VotePermission`
+
+
+
+
struct VotePermission has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ +
@@ -631,6 +662,16 @@ Cannot vote if the specified multi-step proposal is in execution. + + +Cannot call is_multi_step_proposal_in_execution() on single-step proposals. + + +
const ENO_VOTE_PERMISSION: u64 = 13;
+
+ + + Proposal cannot be resolved more than once @@ -780,6 +821,59 @@ Key used to track the resolvable time in the proposal's metadata. + + +## Function `check_vote_permission` + +Permissions + + +
fun check_vote_permission(s: &signer)
+
+ + + +
+Implementation + + +
inline fun check_vote_permission(s: &signer) {
+    assert!(
+        permissioned_signer::check_permission_exists(s, VotePermission {}),
+        error::permission_denied(ENO_VOTE_PERMISSION),
+    );
+}
+
+ + + +
+ + + +## Function `grant_permission` + +Grant permission to vote on behalf of the master signer. + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer)
+
+ + + +
+Implementation + + +
public fun grant_permission(master: &signer, permissioned_signer: &signer) {
+    permissioned_signer::authorize_unlimited(master, permissioned_signer, VotePermission {})
+}
+
+ + + +
+ ## Function `register` @@ -796,6 +890,7 @@ Key used to track the resolvable time in the proposal's metadata.
public fun register<ProposalType: store>(account: &signer) {
+    check_vote_permission(account);
     let addr = signer::address_of(account);
     assert!(!exists<VotingForum<ProposalType>>(addr), error::already_exists(EVOTING_FORUM_ALREADY_REGISTERED));
 
@@ -817,14 +912,15 @@ Key used to track the resolvable time in the proposal's metadata.
                 proposal_type_info: type_info::type_of<ProposalType>(),
             },
         );
+    } else {
+        event::emit_event<RegisterForumEvent>(
+            &mut voting_forum.events.register_forum_events,
+            RegisterForumEvent {
+                hosting_account: addr,
+                proposal_type_info: type_info::type_of<ProposalType>(),
+            },
+        );
     };
-    event::emit_event<RegisterForumEvent>(
-        &mut voting_forum.events.register_forum_events,
-        RegisterForumEvent {
-            hosting_account: addr,
-            proposal_type_info: type_info::type_of<ProposalType>(),
-        },
-    );
 
     move_to(account, voting_forum);
 }
@@ -952,7 +1048,7 @@ resolve this proposal.
         simple_map::add(&mut metadata, is_multi_step_in_execution_key, to_bytes(&false));
         // If the proposal is a single-step proposal, we check if the metadata passed by the client has the IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY key.
         // If they have the key, we will remove it, because a single-step proposal that doesn't need this key.
-    } else if (simple_map::contains_key(&mut metadata, &is_multi_step_in_execution_key)) {
+    } else if (simple_map::contains_key(&metadata, &is_multi_step_in_execution_key)) {
         simple_map::remove(&mut metadata, &is_multi_step_in_execution_key);
     };
 
@@ -982,19 +1078,19 @@ resolve this proposal.
                 min_vote_threshold,
             },
         );
+    } else {
+        event::emit_event<CreateProposalEvent>(
+            &mut voting_forum.events.create_proposal_events,
+            CreateProposalEvent {
+                proposal_id,
+                early_resolution_vote_threshold,
+                execution_hash,
+                expiration_secs,
+                metadata,
+                min_vote_threshold,
+            },
+        );
     };
-    event::emit_event<CreateProposalEvent>(
-        &mut voting_forum.events.create_proposal_events,
-        CreateProposalEvent {
-            proposal_id,
-            early_resolution_vote_threshold,
-            execution_hash,
-            expiration_secs,
-            metadata,
-            min_vote_threshold,
-        },
-    );
-
     proposal_id
 }
 
@@ -1066,11 +1162,12 @@ This guarantees that voting eligibility and voting power are controlled by the r if (std::features::module_event_migration_enabled()) { event::emit(Vote { proposal_id, num_votes }); + } else { + event::emit_event<VoteEvent>( + &mut voting_forum.events.vote_events, + VoteEvent { proposal_id, num_votes }, + ); }; - event::emit_event<VoteEvent>( - &mut voting_forum.events.vote_events, - VoteEvent { proposal_id, num_votes }, - ); }
@@ -1174,16 +1271,17 @@ there are more yes votes than no. If either of these conditions is not met, this resolved_early, }, ); + } else { + event::emit_event<ResolveProposal>( + &mut voting_forum.events.resolve_proposal_events, + ResolveProposal { + proposal_id, + yes_votes: proposal.yes_votes, + no_votes: proposal.no_votes, + resolved_early, + }, + ); }; - event::emit_event<ResolveProposal>( - &mut voting_forum.events.resolve_proposal_events, - ResolveProposal { - proposal_id, - yes_votes: proposal.yes_votes, - no_votes: proposal.no_votes, - resolved_early, - }, - ); option::extract(&mut proposal.execution_content) } @@ -1283,17 +1381,17 @@ there are more yes votes than no. If either of these conditions is not met, this resolved_early, }, ); + } else { + event::emit_event( + &mut voting_forum.events.resolve_proposal_events, + ResolveProposal { + proposal_id, + yes_votes: proposal.yes_votes, + no_votes: proposal.no_votes, + resolved_early, + }, + ); }; - event::emit_event( - &mut voting_forum.events.resolve_proposal_events, - ResolveProposal { - proposal_id, - yes_votes: proposal.yes_votes, - no_votes: proposal.no_votes, - resolved_early, - }, - ); - }
@@ -1907,7 +2005,20 @@ Return true if the voting period of the given proposal has already ended.
pragma verify = true;
-pragma aborts_if_is_strict;
+pragma aborts_if_is_partial;
+
+ + + + + + + +
schema AbortsIfPermissionedSigner {
+    s: signer;
+    let perm = VotePermission {};
+    aborts_if !permissioned_signer::spec_check_permission_exists(s, perm);
+}
 
@@ -1925,10 +2036,6 @@ Return true if the voting period of the given proposal has already ended.
let addr = signer::address_of(account);
 aborts_if exists<VotingForum<ProposalType>>(addr);
-aborts_if !exists<account::Account>(addr);
-let register_account = global<account::Account>(addr);
-aborts_if register_account.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM;
-aborts_if register_account.guid_creation_num + 4 > MAX_U64;
 aborts_if !type_info::spec_is_struct<ProposalType>();
 ensures exists<VotingForum<ProposalType>>(addr);
 
diff --git a/aptos-move/framework/aptos-framework/sources/account.move b/aptos-move/framework/aptos-framework/sources/account/account.move similarity index 64% rename from aptos-move/framework/aptos-framework/sources/account.move rename to aptos-move/framework/aptos-framework/sources/account/account.move index a94dafd45d93b..7ac3144d9f453 100644 --- a/aptos-move/framework/aptos-framework/sources/account.move +++ b/aptos-move/framework/aptos-framework/sources/account/account.move @@ -1,6 +1,7 @@ module aptos_framework::account { use std::bcs; use std::error; + use std::features; use std::hash; use std::option::{Self, Option}; use std::signer; @@ -9,10 +10,13 @@ module aptos_framework::account { use aptos_framework::create_signer::create_signer; use aptos_framework::event::{Self, EventHandle}; use aptos_framework::guid; + use aptos_framework::permissioned_signer; use aptos_framework::system_addresses; use aptos_std::ed25519; use aptos_std::from_bcs; use aptos_std::multi_ed25519; + use aptos_std::single_key; + use aptos_std::multi_key; use aptos_std::table::{Self, Table}; use aptos_std::type_info::{Self, TypeInfo}; @@ -22,7 +26,6 @@ module aptos_framework::account { friend aptos_framework::multisig_account; friend aptos_framework::resource_account; friend aptos_framework::transaction_validation; - friend aptos_framework::governed_gas_pool; #[event] struct KeyRotation has drop, store { @@ -31,6 +34,29 @@ module aptos_framework::account { new_authentication_key: vector, } + #[event] + struct KeyRotationToPublicKey has drop, store { + // The address of the account that is rotating its key + account: address, + // The bitmap of verified public keys. This indicates which public keys have been verified by the account owner. + // The bitmap is 4 bytes long, thus representing 32 bits. Each bit represents whether a public key has been verified. + // In the 32 bit representation, if a bit at index i (read left to right) is 1, then the public key at index i has + // been verified in the public key. + // + // For example: [0x10100000,0x00000000,0x00000000,0x00000000] marks the first and third public keys in the multi-key as verified. + // + // Note: In the case of a single key, only the first bit is used. + verified_public_key_bit_map: vector, + // The scheme of the public key. + public_key_scheme: u8, + // The byte representation of the public key. + public_key: vector, + // The old authentication key on the account + old_auth_key: vector, + // The new authentication key which is the hash of [public_key, public_key_scheme] + new_auth_key: vector, + } + /// Resource representing an account. struct Account has key, store { authentication_key: vector, @@ -51,6 +77,12 @@ module aptos_framework::account { type_info: TypeInfo, } + #[event] + struct CoinRegister has drop, store { + account: address, + type_info: TypeInfo, + } + struct CapabilityOffer has store { for: Option
} struct RotationCapability has drop, store { account: address } @@ -123,12 +155,17 @@ module aptos_framework::account { const ED25519_SCHEME: u8 = 0; /// Scheme identifier for MultiEd25519 signatures used to derive authentication keys for MultiEd25519 public keys. const MULTI_ED25519_SCHEME: u8 = 1; + /// Scheme identifier for single key public keys used to derive authentication keys for single key public keys. + const SINGLE_KEY_SCHEME: u8 = 2; + /// Scheme identifier for multi key public keys used to derive authentication keys for multi key public keys. + const MULTI_KEY_SCHEME: u8 = 3; /// Scheme identifier used when hashing an account's address together with a seed to derive the address (not the /// authentication key) of a resource account. This is an abuse of the notion of a scheme identifier which, for now, /// serves to domain separate hashes used to derive resource account addresses from hashes used to derive /// authentication keys. Without such separation, an adversary could create (and get a signer for) a resource account /// whose address matches an existing address of a MultiEd25519 wallet. const DERIVE_RESOURCE_ACCOUNT_SCHEME: u8 = 255; + /// Account already exists const EACCOUNT_ALREADY_EXISTS: u64 = 1; /// Account does not exist @@ -163,14 +200,24 @@ module aptos_framework::account { const EACCOUNT_ALREADY_USED: u64 = 16; /// Offerer address doesn't exist const EOFFERER_ADDRESS_DOES_NOT_EXIST: u64 = 17; - /// The specified rotation capablity offer does not exist at the specified offerer address + /// The specified rotation capability offer does not exist at the specified offerer address const ENO_SUCH_ROTATION_CAPABILITY_OFFER: u64 = 18; // The signer capability is not offered to any address const ENO_SIGNER_CAPABILITY_OFFERED: u64 = 19; // This account has exceeded the allocated GUIDs it can create. It should be impossible to reach this number for real applications. const EEXCEEDED_MAX_GUID_CREATION_NUM: u64 = 20; - // A required feature flag is not enabled. - const EFLAG_NOT_ENABLED: u64 = 21; + /// The new authentication key already has an entry in the `OriginatingAddress` table + const ENEW_AUTH_KEY_ALREADY_MAPPED: u64 = 21; + /// The current authentication key and the new authentication key are the same + const ENEW_AUTH_KEY_SAME_AS_CURRENT: u64 = 22; + /// Current permissioned signer cannot perform the privilaged operations. + const ENO_ACCOUNT_PERMISSION: u64 = 23; + /// Specified scheme is not recognized. Should be ED25519_SCHEME(0), MULTI_ED25519_SCHEME(1), SINGLE_KEY_SCHEME(2), or MULTI_KEY_SCHEME(3). + const EUNRECOGNIZED_SCHEME: u64 = 24; + /// The provided public key is not a single Keyless public key + const ENOT_A_KEYLESS_PUBLIC_KEY: u64 = 25; + /// The provided public key is not the original public key for the account + const ENOT_THE_ORIGINAL_PUBLIC_KEY: u64 = 26; /// Explicitly separate the GUID space between Object and Account to prevent accidental overlap. const MAX_GUID_CREATION_NUM: u64 = 0x4000000000000; @@ -179,6 +226,43 @@ module aptos_framework::account { /// Create signer for testing, independently of an Aptos-style `Account`. public fun create_signer_for_test(addr: address): signer { create_signer(addr) } + enum AccountPermission has copy, drop, store { + /// Permission to rotate a key. + KeyRotation, + /// Permission to offer another address to act like your address + Offering, + } + + /// Permissions + /// + inline fun check_rotation_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, AccountPermission::KeyRotation {}), + error::permission_denied(ENO_ACCOUNT_PERMISSION), + ); + } + + inline fun check_offering_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, AccountPermission::Offering {}), + error::permission_denied(ENO_ACCOUNT_PERMISSION), + ); + } + + /// Grant permission to perform key rotations on behalf of the master signer. + /// + /// This is **extremely dangerous** and should be granted only when it's absolutely needed. + public fun grant_key_rotation_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, AccountPermission::KeyRotation {}) + } + + /// Grant permission to use offered address's signer on behalf of the master signer. + /// + /// This is **extremely dangerous** and should be granted only when it's absolutely needed. + public fun grant_key_offering_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, AccountPermission::Offering {}) + } + /// Only called during genesis to initialize system resources for this module. public(friend) fun initialize(aptos_framework: &signer) { system_addresses::assert_aptos_framework(aptos_framework); @@ -188,8 +272,12 @@ module aptos_framework::account { } public fun create_account_if_does_not_exist(account_address: address) { - if (!exists(account_address)) { - create_account(account_address); + if (!resource_exists_at(account_address)) { + assert!( + account_address != @vm_reserved && account_address != @aptos_framework && account_address != @aptos_token, + error::invalid_argument(ECANNOT_RESERVED_ADDRESS) + ); + create_account_unchecked(account_address); } } @@ -199,21 +287,24 @@ module aptos_framework::account { public(friend) fun create_account(new_address: address): signer { // there cannot be an Account resource under new_addr already. assert!(!exists(new_address), error::already_exists(EACCOUNT_ALREADY_EXISTS)); - // NOTE: @core_resources gets created via a `create_account` call, so we do not include it below. assert!( new_address != @vm_reserved && new_address != @aptos_framework && new_address != @aptos_token, error::invalid_argument(ECANNOT_RESERVED_ADDRESS) ); - - create_account_unchecked(new_address) + if (features::is_default_account_resource_enabled()) { + create_signer(new_address) + } else { + create_account_unchecked(new_address) + } } + fun create_account_unchecked(new_address: address): signer { let new_account = create_signer(new_address); let authentication_key = bcs::to_bytes(&new_address); assert!( - vector::length(&authentication_key) == 32, + authentication_key.length() == 32, error::invalid_argument(EMALFORMED_AUTHENTICATION_KEY) ); @@ -242,22 +333,83 @@ module aptos_framework::account { } #[view] + /// Returns whether an account exists at `addr`. + /// + /// When the `default_account_resource` feature flag is enabled: + /// - Always returns true, indicating that any address can be treated as a valid account + /// - This reflects a change in the account model where accounts are now considered to exist implicitly + /// - The sequence number and other account properties will return default values (0) for addresses without an Account resource + /// + /// When the feature flag is disabled: + /// - Returns true only if an Account resource exists at `addr` + /// - This is the legacy behavior where accounts must be explicitly created public fun exists_at(addr: address): bool { + features::is_default_account_resource_enabled() || exists(addr) + } + + /// Returns whether an Account resource exists at `addr`. + /// + /// Unlike `exists_at`, this function strictly checks for the presence of the Account resource, + /// regardless of the `default_account_resource` feature flag. + /// + /// This is useful for operations that specifically need to know if the Account resource + /// has been created, rather than just whether the address can be treated as an account. + inline fun resource_exists_at(addr: address): bool { exists(addr) } #[view] + /// Returns the next GUID creation number for `addr`. + /// + /// When the `default_account_resource` feature flag is enabled: + /// - Returns 0 for addresses without an Account resource + /// - This allows GUID creation for previously non-existent accounts + /// - The first GUID created will start the sequence from 0 + /// + /// When the feature flag is disabled: + /// - Aborts if no Account resource exists at `addr` public fun get_guid_next_creation_num(addr: address): u64 acquires Account { - borrow_global(addr).guid_creation_num + if (resource_exists_at(addr)) { + Account[addr].guid_creation_num + } else if (features::is_default_account_resource_enabled()) { + 0 + } else { + abort error::not_found(EACCOUNT_DOES_NOT_EXIST) + } } #[view] public fun get_sequence_number(addr: address): u64 acquires Account { - borrow_global(addr).sequence_number + if (resource_exists_at(addr)) { + Account[addr].sequence_number + } else if (features::is_default_account_resource_enabled()) { + 0 + } else { + abort error::not_found(EACCOUNT_DOES_NOT_EXIST) + } + } + + #[view] + public fun originating_address(auth_key: address): Option
acquires OriginatingAddress { + let address_map_ref = &OriginatingAddress[@aptos_framework].address_map; + if (address_map_ref.contains(auth_key)) { + option::some(*address_map_ref.borrow(auth_key)) + } else { + option::none() + } + } + + inline fun ensure_resource_exists(addr: address) acquires Account{ + if (features::is_default_account_resource_enabled()) { + create_account_if_does_not_exist(addr); + } else { + assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); + } } public(friend) fun increment_sequence_number(addr: address) acquires Account { - let sequence_number = &mut borrow_global_mut(addr).sequence_number; + ensure_resource_exists(addr); + let sequence_number = &mut Account[addr].sequence_number; assert!( (*sequence_number as u128) < MAX_U64, @@ -269,7 +421,13 @@ module aptos_framework::account { #[view] public fun get_authentication_key(addr: address): vector acquires Account { - borrow_global(addr).authentication_key + if (resource_exists_at(addr)) { + Account[addr].authentication_key + } else if (features::is_default_account_resource_enabled()) { + bcs::to_bytes(&addr) + } else { + abort error::not_found(EACCOUNT_DOES_NOT_EXIST) + } } /// This function is used to rotate a resource account's authentication key to `new_auth_key`. This is done in @@ -279,12 +437,13 @@ module aptos_framework::account { /// 3. During multisig_v2 account creation public(friend) fun rotate_authentication_key_internal(account: &signer, new_auth_key: vector) acquires Account { let addr = signer::address_of(account); - assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); + ensure_resource_exists(addr); assert!( - vector::length(&new_auth_key) == 32, + new_auth_key.length() == 32, error::invalid_argument(EMALFORMED_AUTHENTICATION_KEY) ); - let account_resource = borrow_global_mut(addr); + check_rotation_permission(account); + let account_resource = &mut Account[addr]; account_resource.authentication_key = new_auth_key; } @@ -293,10 +452,125 @@ module aptos_framework::account { /// does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we need this functionality due to /// the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in /// the format expected in `rotate_authentication_key`. + /// + /// If you'd like to followup with updating the `OriginatingAddress` table, you can call + /// `set_originating_address()`. entry fun rotate_authentication_key_call(account: &signer, new_auth_key: vector) acquires Account { rotate_authentication_key_internal(account, new_auth_key); } + /// Private entry function for key rotation that allows the signer to update their authentication key from a given public key. + /// This function will abort if the scheme is not recognized or if new_public_key_bytes is not a valid public key for the given scheme. + /// + /// Note: This function does not update the `OriginatingAddress` table. + entry fun rotate_authentication_key_from_public_key(account: &signer, scheme: u8, new_public_key_bytes: vector) acquires Account { + let addr = signer::address_of(account); + let account_resource = &Account[addr]; + let old_auth_key = account_resource.authentication_key; + let new_auth_key; + if (scheme == ED25519_SCHEME) { + let from_pk = ed25519::new_unvalidated_public_key_from_bytes(new_public_key_bytes); + new_auth_key = ed25519::unvalidated_public_key_to_authentication_key(&from_pk); + } else if (scheme == MULTI_ED25519_SCHEME) { + let from_pk = multi_ed25519::new_unvalidated_public_key_from_bytes(new_public_key_bytes); + new_auth_key = multi_ed25519::unvalidated_public_key_to_authentication_key(&from_pk); + } else if (scheme == SINGLE_KEY_SCHEME) { + new_auth_key = single_key::new_public_key_from_bytes(new_public_key_bytes).to_authentication_key(); + } else if (scheme == MULTI_KEY_SCHEME) { + new_auth_key = multi_key::new_public_key_from_bytes(new_public_key_bytes).to_authentication_key(); + } else { + abort error::invalid_argument(EUNRECOGNIZED_SCHEME) + }; + rotate_authentication_key_call(account, new_auth_key); + event::emit(KeyRotationToPublicKey { + account: addr, + // Set verified_public_key_bit_map to [0x00, 0x00, 0x00, 0x00] as the public key(s) are not verified + verified_public_key_bit_map: vector[0x00, 0x00, 0x00, 0x00], + public_key_scheme: scheme, + public_key: new_public_key_bytes, + old_auth_key, + new_auth_key, + }); + } + + /// Upserts an ED25519 backup key to an account that has a keyless public key as its original public key by converting the account's authentication key + /// to a multi-key of the original keyless public key and the new backup key that requires 1 signature from either key to authenticate. + /// This function takes a the account's original keyless public key and a ED25519 backup public key and rotates the account's authentication key to a multi-key of + /// the original keyless public key and the new backup key that requires 1 signature from either key to authenticate. + /// + /// Note: This function emits a `KeyRotationToMultiPublicKey` event marking both keys as verified since the keyless public key + /// is the original public key of the account and the new backup key has been validated via verifying the challenge signed by the new backup key. + /// + /// # Arguments + /// * `account` - The signer representing the keyless account + /// * `keyless_public_key` - The original keyless public key of the account (wrapped in an AnyPublicKey) + /// * `backup_public_key` - The ED25519 public key to add as a backup + /// * `backup_key_proof` - A signature from the backup key proving ownership + /// + /// # Aborts + /// * If the any of inputs deserialize incorrectly + /// * If the provided public key is not a keyless public key + /// * If the keyless public key is not the original public key of the account + /// * If the backup key proof signature is invalid + /// + /// # Events + /// * Emits a `KeyRotationToMultiPublicKey` event with the new multi-key configuration + entry fun upsert_ed25519_backup_key_on_keyless_account(account: &signer, keyless_public_key: vector, backup_public_key: vector, backup_key_proof: vector) acquires Account { + // Check that the provided public key is a keyless public key + let keyless_single_key = single_key::new_public_key_from_bytes(keyless_public_key); + assert!(single_key::is_keyless_or_federated_keyless_public_key(&keyless_single_key), error::invalid_argument(ENOT_A_KEYLESS_PUBLIC_KEY)); + + let addr = signer::address_of(account); + let account_resource = &mut Account[addr]; + let old_auth_key = account_resource.authentication_key; + + // Check that the provided public key is original public key of the account by comparing + // its authentication key to the account address. + assert!( + bcs::to_bytes(&addr) == keyless_single_key.to_authentication_key(), + error::invalid_argument(ENOT_THE_ORIGINAL_PUBLIC_KEY) + ); + + let curr_auth_key_as_address = from_bcs::to_address(old_auth_key); + let challenge = RotationProofChallenge { + sequence_number: account_resource.sequence_number, + originator: addr, + current_auth_key: curr_auth_key_as_address, + new_public_key: backup_public_key, + }; + + // Assert the challenges signed by the provided backup key is valid + assert_valid_rotation_proof_signature_and_get_auth_key( + ED25519_SCHEME, + backup_public_key, + backup_key_proof, + &challenge + ); + + // Get the backup key as a single key + let backup_key_ed25519 = ed25519::new_unvalidated_public_key_from_bytes(backup_public_key); + let backup_key_as_single_key = single_key::from_ed25519_public_key_unvalidated(backup_key_ed25519); + + let new_public_key = multi_key::new_multi_key_from_single_keys(vector[keyless_single_key, backup_key_as_single_key], 1); + let new_auth_key = new_public_key.to_authentication_key(); + + // Rotate the authentication key to the new multi key public key + rotate_authentication_key_call(account, new_auth_key); + + event::emit(KeyRotationToPublicKey { + account: addr, + // This marks that both the keyless public key and the new backup key are verified + // The keyless public key is the original public key of the account and the new backup key + // has been validated via verifying the challenge signed by the new backup key. + // Represents the bitmap 0b11000000000000000000000000000000 + verified_public_key_bit_map: vector[0xC0, 0x00, 0x00, 0x00], + public_key_scheme: MULTI_KEY_SCHEME, + public_key: bcs::to_bytes(&new_public_key), + old_auth_key, + new_auth_key, + }); + } + /// Generic authentication key rotation function that allows the user to rotate their authentication key from any scheme to any scheme. /// To authorize the rotation, we need two signatures: /// - the first signature `cap_rotate_key` refers to the signature by the account owner's current key on a valid `RotationProofChallenge`, @@ -312,7 +586,7 @@ module aptos_framework::account { /// Here is an example attack if we don't ask for the second signature `cap_update_table`: /// Alice has rotated her account `addr_a` to `new_addr_a`. As a result, the following entry is created, to help Alice when recovering her wallet: /// `OriginatingAddress[new_addr_a]` -> `addr_a` - /// Alice has had bad day: her laptop blew up and she needs to reset her account on a new one. + /// Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one. /// (Fortunately, she still has her secret key `new_sk_a` associated with her new address `new_addr_a`, so she can do this.) /// /// But Bob likes to mess with Alice. @@ -335,9 +609,10 @@ module aptos_framework::account { cap_update_table: vector, ) acquires Account, OriginatingAddress { let addr = signer::address_of(account); - assert!(exists_at(addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); - let account_resource = borrow_global_mut(addr); - + ensure_resource_exists(addr); + check_rotation_permission(account); + let account_resource = &mut Account[addr]; + let old_auth_key = account_resource.authentication_key; // Verify the given `from_public_key_bytes` matches this account's current authentication key. if (from_scheme == ED25519_SCHEME) { let from_pk = ed25519::new_unvalidated_public_key_from_bytes(from_public_key_bytes); @@ -382,6 +657,25 @@ module aptos_framework::account { // Update the `OriginatingAddress` table. update_auth_key_and_originating_address_table(addr, account_resource, new_auth_key); + + let verified_public_key_bit_map; + if (to_scheme == ED25519_SCHEME) { + // Set verified_public_key_bit_map to [0x80, 0x00, 0x00, 0x00] as the public key is verified and there is only one public key. + verified_public_key_bit_map = vector[0x80, 0x00, 0x00, 0x00]; + } else { + // The new key is a multi-ed25519 key, so set the verified_public_key_bit_map to the signature bitmap. + let len = vector::length(&cap_update_table); + verified_public_key_bit_map = vector::slice(&cap_update_table, len - 4, len); + }; + + event::emit(KeyRotationToPublicKey { + account: addr, + verified_public_key_bit_map, + public_key_scheme: to_scheme, + public_key: to_public_key_bytes, + old_auth_key, + new_auth_key, + }); } public entry fun rotate_authentication_key_with_rotation_capability( @@ -391,13 +685,15 @@ module aptos_framework::account { new_public_key_bytes: vector, cap_update_table: vector ) acquires Account, OriginatingAddress { - assert!(exists_at(rotation_cap_offerer_address), error::not_found(EOFFERER_ADDRESS_DOES_NOT_EXIST)); + check_rotation_permission(delegate_signer); + assert!(resource_exists_at(rotation_cap_offerer_address), error::not_found(EOFFERER_ADDRESS_DOES_NOT_EXIST)); // Check that there exists a rotation capability offer at the offerer's account resource for the delegate. let delegate_address = signer::address_of(delegate_signer); - let offerer_account_resource = borrow_global(rotation_cap_offerer_address); + let offerer_account_resource = &Account[rotation_cap_offerer_address]; + let old_auth_key = offerer_account_resource.authentication_key; assert!( - option::contains(&offerer_account_resource.rotation_capability_offer.for, &delegate_address), + offerer_account_resource.rotation_capability_offer.for.contains(&delegate_address), error::not_found(ENO_SUCH_ROTATION_CAPABILITY_OFFER) ); @@ -418,12 +714,31 @@ module aptos_framework::account { ); // Update the `OriginatingAddress` table, so we can find the originating address using the new address. - let offerer_account_resource = borrow_global_mut(rotation_cap_offerer_address); + let offerer_account_resource = &mut Account[rotation_cap_offerer_address]; update_auth_key_and_originating_address_table( rotation_cap_offerer_address, offerer_account_resource, new_auth_key ); + + let verified_public_key_bit_map; + if (new_scheme == ED25519_SCHEME) { + // Set verified_public_key_bit_map to [0x80, 0x00, 0x00, 0x00] as the public key is verified and there is only one public key. + verified_public_key_bit_map = vector[0x80, 0x00, 0x00, 0x00]; + } else { + // The new key is a multi-ed25519 key, so set the verified_public_key_bit_map to the signature bitmap. + let len = vector::length(&cap_update_table); + verified_public_key_bit_map = vector::slice(&cap_update_table, len - 4, len); + }; + + event::emit(KeyRotationToPublicKey { + account: rotation_cap_offerer_address, + verified_public_key_bit_map, + public_key_scheme: new_scheme, + public_key: new_public_key_bytes, + old_auth_key, + new_auth_key, + }); } /// Offers rotation capability on behalf of `account` to the account at address `recipient_address`. @@ -450,11 +765,13 @@ module aptos_framework::account { account_public_key_bytes: vector, recipient_address: address, ) acquires Account { + check_rotation_permission(account); let addr = signer::address_of(account); + ensure_resource_exists(addr); assert!(exists_at(recipient_address), error::not_found(EACCOUNT_DOES_NOT_EXIST)); // proof that this account intends to delegate its rotation capability to another account - let account_resource = borrow_global_mut(addr); + let account_resource = &mut Account[addr]; let proof_challenge = RotationCapabilityOfferProofChallengeV2 { chain_id: chain_id::get(), sequence_number: account_resource.sequence_number, @@ -494,34 +811,74 @@ module aptos_framework::account { }; // update the existing rotation capability offer or put in a new rotation capability offer for the current account - option::swap_or_fill(&mut account_resource.rotation_capability_offer.for, recipient_address); + account_resource.rotation_capability_offer.for.swap_or_fill(recipient_address); + } + + /// For the given account, add an entry to `OriginatingAddress` table mapping the account's + /// authentication key to the account's address. + /// + /// Can be used as a followup to `rotate_authentication_key_call()` to reconcile the + /// `OriginatingAddress` table, or to establish a mapping for a new account that has not yet had + /// its authentication key rotated. + /// + /// Aborts if there is already an entry in the `OriginatingAddress` table for the account's + /// authentication key. + /// + /// Kept as a private entry function to ensure that after an unproven rotation via + /// `rotate_authentication_key_call()`, the `OriginatingAddress` table is only updated under the + /// authority of the new authentication key. + entry fun set_originating_address(account: &signer) acquires Account, OriginatingAddress { + let account_addr = signer::address_of(account); + assert!(exists(account_addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); + let auth_key_as_address = + from_bcs::to_address(Account[account_addr].authentication_key); + let address_map_ref_mut = + &mut OriginatingAddress[@aptos_framework].address_map; + if (address_map_ref_mut.contains(auth_key_as_address)) { + assert!( + *address_map_ref_mut.borrow(auth_key_as_address) == account_addr, + error::invalid_argument(ENEW_AUTH_KEY_ALREADY_MAPPED) + ); + } else { + address_map_ref_mut.add(auth_key_as_address, account_addr); + }; } #[view] /// Returns true if the account at `account_addr` has a rotation capability offer. public fun is_rotation_capability_offered(account_addr: address): bool acquires Account { - let account_resource = borrow_global(account_addr); - option::is_some(&account_resource.rotation_capability_offer.for) + if (features::is_default_account_resource_enabled()) { + if (!resource_exists_at(account_addr)) { + return false; + } + } else { + assert!(exists_at(account_addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); + }; + let account_resource = &Account[account_addr]; + account_resource.rotation_capability_offer.for.is_some() } #[view] /// Returns the address of the account that has a rotation capability offer from the account at `account_addr`. public fun get_rotation_capability_offer_for(account_addr: address): address acquires Account { - let account_resource = borrow_global(account_addr); + assert_account_resource_with_error(account_addr, ENO_SUCH_ROTATION_CAPABILITY_OFFER); + let account_resource = &Account[account_addr]; assert!( - option::is_some(&account_resource.rotation_capability_offer.for), + account_resource.rotation_capability_offer.for.is_some(), error::not_found(ENO_SIGNER_CAPABILITY_OFFERED), ); - *option::borrow(&account_resource.rotation_capability_offer.for) + *account_resource.rotation_capability_offer.for.borrow() } /// Revoke the rotation capability offer given to `to_be_revoked_recipient_address` from `account` public entry fun revoke_rotation_capability(account: &signer, to_be_revoked_address: address) acquires Account { assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST)); + check_rotation_permission(account); let addr = signer::address_of(account); - let account_resource = borrow_global_mut(addr); + assert_account_resource_with_error(addr, ENO_SUCH_ROTATION_CAPABILITY_OFFER); + let account_resource = &Account[addr]; assert!( - option::contains(&account_resource.rotation_capability_offer.for, &to_be_revoked_address), + account_resource.rotation_capability_offer.for.contains(&to_be_revoked_address), error::not_found(ENO_SUCH_ROTATION_CAPABILITY_OFFER) ); revoke_any_rotation_capability(account); @@ -529,8 +886,11 @@ module aptos_framework::account { /// Revoke any rotation capability offer in the specified account. public entry fun revoke_any_rotation_capability(account: &signer) acquires Account { - let account_resource = borrow_global_mut(signer::address_of(account)); - option::extract(&mut account_resource.rotation_capability_offer.for); + check_rotation_permission(account); + let offerer_addr = signer::address_of(account); + assert_account_resource_with_error(offerer_addr, ENO_SUCH_ROTATION_CAPABILITY_OFFER); + let account_resource = &mut Account[signer::address_of(account)]; + account_resource.rotation_capability_offer.for.extract(); } /// Offers signer capability on behalf of `account` to the account at address `recipient_address`. @@ -549,7 +909,9 @@ module aptos_framework::account { account_public_key_bytes: vector, recipient_address: address ) acquires Account { + check_offering_permission(account); let source_address = signer::address_of(account); + ensure_resource_exists(source_address); assert!(exists_at(recipient_address), error::not_found(EACCOUNT_DOES_NOT_EXIST)); // Proof that this account intends to delegate its signer capability to another account. @@ -562,36 +924,46 @@ module aptos_framework::account { source_address, account_scheme, account_public_key_bytes, signer_capability_sig_bytes, proof_challenge); // Update the existing signer capability offer or put in a new signer capability offer for the recipient. - let account_resource = borrow_global_mut(source_address); - option::swap_or_fill(&mut account_resource.signer_capability_offer.for, recipient_address); + let account_resource = &mut Account[source_address]; + account_resource.signer_capability_offer.for.swap_or_fill(recipient_address); } #[view] /// Returns true if the account at `account_addr` has a signer capability offer. public fun is_signer_capability_offered(account_addr: address): bool acquires Account { - let account_resource = borrow_global(account_addr); - option::is_some(&account_resource.signer_capability_offer.for) + if (features::is_default_account_resource_enabled()) { + if (!resource_exists_at(account_addr)) { + return false; + } + } else { + assert!(exists_at(account_addr), error::not_found(EACCOUNT_DOES_NOT_EXIST)); + }; + let account_resource = &Account[account_addr]; + account_resource.signer_capability_offer.for.is_some() } #[view] /// Returns the address of the account that has a signer capability offer from the account at `account_addr`. public fun get_signer_capability_offer_for(account_addr: address): address acquires Account { - let account_resource = borrow_global(account_addr); + assert_account_resource_with_error(account_addr, ENO_SIGNER_CAPABILITY_OFFERED); + let account_resource = &Account[account_addr]; assert!( - option::is_some(&account_resource.signer_capability_offer.for), + account_resource.signer_capability_offer.for.is_some(), error::not_found(ENO_SIGNER_CAPABILITY_OFFERED), ); - *option::borrow(&account_resource.signer_capability_offer.for) + *account_resource.signer_capability_offer.for.borrow() } /// Revoke the account owner's signer capability offer for `to_be_revoked_address` (i.e., the address that /// has a signer capability offer from `account` but will be revoked in this function). public entry fun revoke_signer_capability(account: &signer, to_be_revoked_address: address) acquires Account { + check_offering_permission(account); assert!(exists_at(to_be_revoked_address), error::not_found(EACCOUNT_DOES_NOT_EXIST)); let addr = signer::address_of(account); - let account_resource = borrow_global_mut(addr); + assert_account_resource_with_error(addr, ENO_SUCH_SIGNER_CAPABILITY); + let account_resource = &Account[addr]; assert!( - option::contains(&account_resource.signer_capability_offer.for, &to_be_revoked_address), + account_resource.signer_capability_offer.for.contains(&to_be_revoked_address), error::not_found(ENO_SUCH_SIGNER_CAPABILITY) ); revoke_any_signer_capability(account); @@ -599,26 +971,40 @@ module aptos_framework::account { /// Revoke any signer capability offer in the specified account. public entry fun revoke_any_signer_capability(account: &signer) acquires Account { - let account_resource = borrow_global_mut(signer::address_of(account)); - option::extract(&mut account_resource.signer_capability_offer.for); + check_offering_permission(account); + let offerer_addr = signer::address_of(account); + assert_account_resource_with_error(offerer_addr, ENO_SUCH_SIGNER_CAPABILITY); + let account_resource = &mut Account[signer::address_of(account)]; + account_resource.signer_capability_offer.for.extract(); } /// Return an authorized signer of the offerer, if there's an existing signer capability offer for `account` /// at the offerer's address. public fun create_authorized_signer(account: &signer, offerer_address: address): signer acquires Account { - assert!(exists_at(offerer_address), error::not_found(EOFFERER_ADDRESS_DOES_NOT_EXIST)); - + check_offering_permission(account); + assert_account_resource_with_error(offerer_address, ENO_SUCH_SIGNER_CAPABILITY); // Check if there's an existing signer capability offer from the offerer. - let account_resource = borrow_global(offerer_address); + let account_resource = &Account[offerer_address]; let addr = signer::address_of(account); assert!( - option::contains(&account_resource.signer_capability_offer.for, &addr), + account_resource.signer_capability_offer.for.contains(&addr), error::not_found(ENO_SUCH_SIGNER_CAPABILITY) ); create_signer(offerer_address) } + inline fun assert_account_resource_with_error(account: address, error_code: u64) { + if (features::is_default_account_resource_enabled()) { + assert!( + resource_exists_at(account), + error::not_found(error_code), + ); + } else { + assert!(exists_at(account), error::not_found(EACCOUNT_DOES_NOT_EXIST)); + }; + } + /////////////////////////////////////////////////////////////////////////// /// Helper functions for authentication key rotation. /////////////////////////////////////////////////////////////////////////// @@ -656,13 +1042,18 @@ module aptos_framework::account { account_resource: &mut Account, new_auth_key_vector: vector, ) acquires OriginatingAddress { - let address_map = &mut borrow_global_mut(@aptos_framework).address_map; + let address_map = &mut OriginatingAddress[@aptos_framework].address_map; let curr_auth_key = from_bcs::to_address(account_resource.authentication_key); + let new_auth_key = from_bcs::to_address(new_auth_key_vector); + assert!( + new_auth_key != curr_auth_key, + error::invalid_argument(ENEW_AUTH_KEY_SAME_AS_CURRENT) + ); // Checks `OriginatingAddress[curr_auth_key]` is either unmapped, or mapped to `originating_address`. // If it's mapped to the originating address, removes that mapping. // Otherwise, abort if it's mapped to a different address. - if (table::contains(address_map, curr_auth_key)) { + if (address_map.contains(curr_auth_key)) { // If account_a with address_a is rotating its keypair from keypair_a to keypair_b, we expect // the address of the account to stay the same, while its keypair updates to keypair_b. // Here, by asserting that we're calling from the account with the originating address, we enforce @@ -673,14 +1064,17 @@ module aptos_framework::account { // If the account with address b calls this function with two valid signatures, it will abort at this step, // because address b is not the account's originating address. assert!( - originating_addr == table::remove(address_map, curr_auth_key), + originating_addr == address_map.remove(curr_auth_key), error::not_found(EINVALID_ORIGINATING_ADDRESS) ); }; // Set `OriginatingAddress[new_auth_key] = originating_address`. - let new_auth_key = from_bcs::to_address(new_auth_key_vector); - table::add(address_map, new_auth_key, originating_addr); + assert!( + !address_map.contains(new_auth_key), + error::invalid_argument(ENEW_AUTH_KEY_ALREADY_MAPPED) + ); + address_map.add(new_auth_key, originating_addr); if (std::features::module_event_migration_enabled()) { event::emit(KeyRotation { @@ -688,14 +1082,15 @@ module aptos_framework::account { old_authentication_key: account_resource.authentication_key, new_authentication_key: new_auth_key_vector, }); + } else { + event::emit_event( + &mut account_resource.key_rotation_events, + KeyRotationEvent { + old_authentication_key: account_resource.authentication_key, + new_authentication_key: new_auth_key_vector, + } + ); }; - event::emit_event( - &mut account_resource.key_rotation_events, - KeyRotationEvent { - old_authentication_key: account_resource.authentication_key, - new_authentication_key: new_auth_key_vector, - } - ); // Update the account resource's authentication key. account_resource.authentication_key = new_auth_key_vector; @@ -709,8 +1104,8 @@ module aptos_framework::account { /// involves the use of a cryptographic hash operation and should be use thoughtfully. public fun create_resource_address(source: &address, seed: vector): address { let bytes = bcs::to_bytes(source); - vector::append(&mut bytes, seed); - vector::push_back(&mut bytes, DERIVE_RESOURCE_ACCOUNT_SCHEME); + bytes.append(seed); + bytes.push_back(DERIVE_RESOURCE_ACCOUNT_SCHEME); from_bcs::to_address(hash::sha3_256(bytes)) } @@ -726,13 +1121,15 @@ module aptos_framework::account { public fun create_resource_account(source: &signer, seed: vector): (signer, SignerCapability) acquires Account { let resource_addr = create_resource_address(&signer::address_of(source), seed); let resource = if (exists_at(resource_addr)) { - let account = borrow_global(resource_addr); + if (resource_exists_at(resource_addr)) { + let account = &Account[resource_addr]; assert!( - option::is_none(&account.signer_capability_offer.for), + account.signer_capability_offer.for.is_none(), error::already_exists(ERESOURCE_ACCCOUNT_EXISTS), ); + }; assert!( - account.sequence_number == 0, + get_sequence_number(resource_addr) == 0, error::invalid_state(EACCOUNT_ALREADY_USED), ); create_signer(resource_addr) @@ -745,7 +1142,7 @@ module aptos_framework::account { // of the resource account using the SignerCapability. rotate_authentication_key_internal(&resource, ZERO_AUTH_KEY); - let account = borrow_global_mut(resource_addr); + let account = &mut Account[resource_addr]; account.signer_capability_offer.for = option::some(resource_addr); let signer_cap = SignerCapability { account: resource_addr }; (resource, signer_cap) @@ -775,9 +1172,20 @@ module aptos_framework::account { /// GUID management methods. /////////////////////////////////////////////////////////////////////////// + /// Creates a new GUID for `account_signer` and increments the GUID creation number. + /// + /// When the `default_account_resource` feature flag is enabled: + /// - If no Account resource exists, one will be created automatically + /// - This ensures consistent GUID creation behavior for all addresses + /// + /// When the feature flag is disabled: + /// - Aborts if no Account resource exists + /// + /// Aborts if the maximum number of GUIDs has been reached (0x4000000000000) public fun create_guid(account_signer: &signer): guid::GUID acquires Account { let addr = signer::address_of(account_signer); - let account = borrow_global_mut(addr); + ensure_resource_exists(addr); + let account = &mut Account[addr]; let guid = guid::create(addr, &mut account.guid_creation_num); assert!( account.guid_creation_num < MAX_GUID_CREATION_NUM, @@ -786,10 +1194,10 @@ module aptos_framework::account { guid } - /////////////////////////////////////////////////////////////////////////// - /// GUID management methods. - /////////////////////////////////////////////////////////////////////////// - + /// Creates a new event handle for `account`. + /// + /// This is a wrapper around `create_guid` that creates an EventHandle, + /// inheriting the same behavior regarding account existence and feature flags. public fun new_event_handle(account: &signer): EventHandle acquires Account { event::new_event_handle(create_guid(account)) } @@ -799,13 +1207,23 @@ module aptos_framework::account { /////////////////////////////////////////////////////////////////////////// public(friend) fun register_coin(account_addr: address) acquires Account { - let account = borrow_global_mut(account_addr); - event::emit_event( - &mut account.coin_register_events, - CoinRegisterEvent { - type_info: type_info::type_of(), - }, - ); + if (std::features::module_event_migration_enabled()) { + event::emit( + CoinRegister { + account: account_addr, + type_info: type_info::type_of(), + }, + ); + } else { + ensure_resource_exists(account_addr); + let account = &mut Account[account_addr]; + event::emit_event( + &mut account.coin_register_events, + CoinRegisterEvent { + type_info: type_info::type_of(), + }, + ); + } } /////////////////////////////////////////////////////////////////////////// @@ -818,7 +1236,7 @@ module aptos_framework::account { recipient_address: address, ): SignerCapabilityOfferProofChallengeV2 acquires Account { SignerCapabilityOfferProofChallengeV2 { - sequence_number: borrow_global_mut(source_address).sequence_number, + sequence_number: get_sequence_number(source_address), source_address, recipient_address, } @@ -844,13 +1262,13 @@ module aptos_framework::account { signed_message_bytes: vector, message: T, ) acquires Account { - let account_resource = borrow_global_mut(account); + let auth_key = get_authentication_key(account); // Verify that the `SignerCapabilityOfferProofChallengeV2` has the right information and is signed by the account owner's key if (account_scheme == ED25519_SCHEME) { let pubkey = ed25519::new_unvalidated_public_key_from_bytes(account_public_key); let expected_auth_key = ed25519::unvalidated_public_key_to_authentication_key(&pubkey); assert!( - account_resource.authentication_key == expected_auth_key, + auth_key == expected_auth_key, error::invalid_argument(EWRONG_CURRENT_PUBLIC_KEY), ); @@ -863,7 +1281,7 @@ module aptos_framework::account { let pubkey = multi_ed25519::new_unvalidated_public_key_from_bytes(account_public_key); let expected_auth_key = multi_ed25519::unvalidated_public_key_to_authentication_key(&pubkey); assert!( - account_resource.authentication_key == expected_auth_key, + auth_key == expected_auth_key, error::invalid_argument(EWRONG_CURRENT_PUBLIC_KEY), ); @@ -880,7 +1298,7 @@ module aptos_framework::account { #[test_only] public fun create_account_for_test(new_address: address): signer { // Make this easier by just allowing the account to be created again in a test - if (!exists_at(new_address)) { + if (!resource_exists_at(new_address)) { create_account_unchecked(new_address) } else { create_signer_for_test(new_address) @@ -915,13 +1333,13 @@ module aptos_framework::account { let recipient_address = signer::address_of(&eve); let seed = eve_pk_bytes; // multisig public key - vector::push_back(&mut seed, 1); // multisig threshold - vector::push_back(&mut seed, 1); // signature scheme id + seed.push_back(1); // multisig threshold + seed.push_back(1); // signature scheme id let (resource, _) = create_resource_account(&alice, seed); let resource_addr = signer::address_of(&resource); let proof_challenge = SignerCapabilityOfferProofChallengeV2 { - sequence_number: borrow_global_mut(resource_addr).sequence_number, + sequence_number: get_sequence_number(resource_addr), source_address: resource_addr, recipient_address, }; @@ -930,15 +1348,15 @@ module aptos_framework::account { // Construct a malicious 1-out-of-2 multisig PK over Alice's authentication key and Eve's Ed25519 PK. let account_public_key_bytes = alice_auth; - vector::append(&mut account_public_key_bytes, eve_pk_bytes); - vector::push_back(&mut account_public_key_bytes, 1); // Multisig verification threshold. + account_public_key_bytes.append(eve_pk_bytes); + account_public_key_bytes.push_back(1); // Multisig verification threshold. let fake_pk = multi_ed25519::new_unvalidated_public_key_from_bytes(account_public_key_bytes); // Construct a multisig for `proof_challenge` as if it is signed by the signers behind `fake_pk`, // Eve being the only participant. let signer_capability_sig_bytes = x""; - vector::append(&mut signer_capability_sig_bytes, ed25519::signature_to_bytes(&eve_sig)); - vector::append(&mut signer_capability_sig_bytes, x"40000000"); // Signers bitmap. + signer_capability_sig_bytes.append(ed25519::signature_to_bytes(&eve_sig)); + signer_capability_sig_bytes.append(x"40000000"); // Signers bitmap. let fake_sig = multi_ed25519::new_signature_from_bytes(signer_capability_sig_bytes); assert!( @@ -958,7 +1376,7 @@ module aptos_framework::account { struct DummyResource has key {} #[test(user = @0x1)] - public entry fun test_module_capability(user: signer) acquires Account, DummyResource { + public entry fun test_module_capability(user: signer) acquires Account { let (resource_account, signer_cap) = create_resource_account(&user, x"01"); assert!(signer::address_of(&resource_account) != signer::address_of(&user), 0); @@ -966,7 +1384,7 @@ module aptos_framework::account { assert!(&resource_account == &resource_account_from_cap, 1); move_to(&resource_account_from_cap, DummyResource {}); - borrow_global(signer::address_of(&resource_account)); + assert!(exists(signer::address_of(&resource_account))); } #[test(user = @0x1)] @@ -993,7 +1411,7 @@ module aptos_framework::account { public fun increment_sequence_number_for_test( addr: address, ) acquires Account { - let acct = borrow_global_mut(addr); + let acct = &mut Account[addr]; acct.sequence_number = acct.sequence_number + 1; } @@ -1003,7 +1421,7 @@ module aptos_framework::account { addr: address, s: u64 ) acquires Account { - borrow_global_mut(addr).sequence_number = s; + Account[addr].sequence_number = s; } #[test_only] @@ -1013,14 +1431,16 @@ module aptos_framework::account { #[test_only] public fun set_signer_capability_offer(offerer: address, receiver: address) acquires Account { - let account_resource = borrow_global_mut(offerer); - option::swap_or_fill(&mut account_resource.signer_capability_offer.for, receiver); + ensure_resource_exists(offerer); + let account_resource = &mut Account[offerer]; + account_resource.signer_capability_offer.for.swap_or_fill(receiver); } #[test_only] public fun set_rotation_capability_offer(offerer: address, receiver: address) acquires Account { - let account_resource = borrow_global_mut(offerer); - option::swap_or_fill(&mut account_resource.rotation_capability_offer.for, receiver); + ensure_resource_exists(offerer); + let account_resource = &mut Account[offerer]; + account_resource.rotation_capability_offer.for.swap_or_fill(receiver); } #[test] @@ -1028,15 +1448,15 @@ module aptos_framework::account { public entry fun mock_sequence_numbers() acquires Account { let addr: address = @0x1234; // Define test address - create_account(addr); // Initialize account resource + create_account_unchecked(addr); // Initialize account resource // Assert sequence number intializes to 0 - assert!(borrow_global(addr).sequence_number == 0, 0); + assert!(Account[addr].sequence_number == 0, 0); increment_sequence_number_for_test(addr); // Increment sequence number // Assert correct mock value post-increment - assert!(borrow_global(addr).sequence_number == 1, 1); + assert!(Account[addr].sequence_number == 1, 1); set_sequence_number(addr, 10); // Set mock sequence number // Assert correct mock value post-modification - assert!(borrow_global(addr).sequence_number == 10, 2); + assert!(Account[addr].sequence_number == 10, 2); } /////////////////////////////////////////////////////////////////////////// @@ -1047,7 +1467,7 @@ module aptos_framework::account { #[expected_failure(abort_code = 65537, location = aptos_framework::ed25519)] public entry fun test_empty_public_key(alice: signer) acquires Account, OriginatingAddress { create_account(signer::address_of(&alice)); - let pk = vector::empty(); + let pk = vector[]; let sig = x"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; rotate_authentication_key(&alice, ED25519_SCHEME, pk, ED25519_SCHEME, pk, sig, sig); } @@ -1056,7 +1476,7 @@ module aptos_framework::account { #[expected_failure(abort_code = 262151, location = Self)] public entry fun test_empty_signature(alice: signer) acquires Account, OriginatingAddress { create_account(signer::address_of(&alice)); - let test_signature = vector::empty(); + let test_signature = vector[]; let pk = x"0000000000000000000000000000000000000000000000000000000000000000"; rotate_authentication_key(&alice, ED25519_SCHEME, pk, ED25519_SCHEME, pk, test_signature, test_signature); } @@ -1086,7 +1506,7 @@ module aptos_framework::account { create_account(bob_addr); let challenge = SignerCapabilityOfferProofChallengeV2 { - sequence_number: borrow_global(alice_addr).sequence_number, + sequence_number: Account[alice_addr].sequence_number, source_address: alice_addr, recipient_address: bob_addr, }; @@ -1095,7 +1515,7 @@ module aptos_framework::account { // Maul the signature and make sure the call would fail let invalid_signature = ed25519::signature_to_bytes(&sig); - let first_sig_byte = vector::borrow_mut(&mut invalid_signature, 0); + let first_sig_byte = &mut invalid_signature[0]; *first_sig_byte = *first_sig_byte ^ 1; offer_signer_capability(&alice, invalid_signature, 0, alice_pk_bytes, bob_addr); @@ -1112,7 +1532,7 @@ module aptos_framework::account { create_account(bob_addr); let challenge = SignerCapabilityOfferProofChallengeV2 { - sequence_number: borrow_global(alice_addr).sequence_number, + sequence_number: Account[alice_addr].sequence_number, source_address: alice_addr, recipient_address: bob_addr, }; @@ -1127,12 +1547,129 @@ module aptos_framework::account { bob_addr ); - assert!(option::contains(&borrow_global(alice_addr).signer_capability_offer.for, &bob_addr), 0); + assert!(Account[alice_addr].signer_capability_offer.for.contains(&bob_addr), 0); let signer = create_authorized_signer(&bob, alice_addr); assert!(signer::address_of(&signer) == signer::address_of(&alice), 0); } + #[test(bob = @0x345)] + public entry fun test_valid_check_signer_capability_and_create_authorized_signer_with_permission(bob: signer) acquires Account { + let (alice_sk, alice_pk) = ed25519::generate_keys(); + let alice_pk_bytes = ed25519::validated_public_key_to_bytes(&alice_pk); + let alice = create_account_from_ed25519_public_key(alice_pk_bytes); + let alice_addr = signer::address_of(&alice); + + let bob_addr = signer::address_of(&bob); + create_account(bob_addr); + + let challenge = SignerCapabilityOfferProofChallengeV2 { + sequence_number: Account[alice_addr].sequence_number, + source_address: alice_addr, + recipient_address: bob_addr, + }; + + let alice_signer_capability_offer_sig = ed25519::sign_struct(&alice_sk, challenge); + + let alice_permission_handle = permissioned_signer::create_permissioned_handle(&alice); + let alice_permission_signer = permissioned_signer::signer_from_permissioned_handle(&alice_permission_handle); + + grant_key_offering_permission(&alice, &alice_permission_signer); + + offer_signer_capability( + &alice_permission_signer, + ed25519::signature_to_bytes(&alice_signer_capability_offer_sig), + 0, + alice_pk_bytes, + bob_addr + ); + + assert!(Account[alice_addr].signer_capability_offer.for.contains(&bob_addr), 0); + + let signer = create_authorized_signer(&bob, alice_addr); + assert!(signer::address_of(&signer) == signer::address_of(&alice), 0); + + permissioned_signer::destroy_permissioned_handle(alice_permission_handle); + } + + #[test(bob = @0x345)] + #[expected_failure(abort_code = 0x50017, location = Self)] + public entry fun test_valid_check_signer_capability_and_create_authorized_signer_with_no_permission(bob: signer) acquires Account { + let (alice_sk, alice_pk) = ed25519::generate_keys(); + let alice_pk_bytes = ed25519::validated_public_key_to_bytes(&alice_pk); + let alice = create_account_from_ed25519_public_key(alice_pk_bytes); + let alice_addr = signer::address_of(&alice); + + let bob_addr = signer::address_of(&bob); + create_account(bob_addr); + + let challenge = SignerCapabilityOfferProofChallengeV2 { + sequence_number: Account[alice_addr].sequence_number, + source_address: alice_addr, + recipient_address: bob_addr, + }; + + let alice_signer_capability_offer_sig = ed25519::sign_struct(&alice_sk, challenge); + + let alice_permission_handle = permissioned_signer::create_permissioned_handle(&alice); + let alice_permission_signer = permissioned_signer::signer_from_permissioned_handle(&alice_permission_handle); + + offer_signer_capability( + &alice_permission_signer, + ed25519::signature_to_bytes(&alice_signer_capability_offer_sig), + 0, + alice_pk_bytes, + bob_addr + ); + + assert!(Account[alice_addr].signer_capability_offer.for.contains(&bob_addr), 0); + + let signer = create_authorized_signer(&bob, alice_addr); + assert!(signer::address_of(&signer) == signer::address_of(&alice), 0); + + permissioned_signer::destroy_permissioned_handle(alice_permission_handle); + } + + #[test(bob = @0x345)] + #[expected_failure(abort_code = 0x50017, location = Self)] + public entry fun test_valid_check_signer_capability_and_create_authorized_signer_with_wrong_permission(bob: signer) acquires Account { + let (alice_sk, alice_pk) = ed25519::generate_keys(); + let alice_pk_bytes = ed25519::validated_public_key_to_bytes(&alice_pk); + let alice = create_account_from_ed25519_public_key(alice_pk_bytes); + let alice_addr = signer::address_of(&alice); + + let bob_addr = signer::address_of(&bob); + create_account(bob_addr); + + let challenge = SignerCapabilityOfferProofChallengeV2 { + sequence_number: Account[alice_addr].sequence_number, + source_address: alice_addr, + recipient_address: bob_addr, + }; + + let alice_signer_capability_offer_sig = ed25519::sign_struct(&alice_sk, challenge); + + let alice_permission_handle = permissioned_signer::create_permissioned_handle(&alice); + let alice_permission_signer = permissioned_signer::signer_from_permissioned_handle(&alice_permission_handle); + + grant_key_rotation_permission(&alice, &alice_permission_signer); + + offer_signer_capability( + &alice_permission_signer, + ed25519::signature_to_bytes(&alice_signer_capability_offer_sig), + 0, + alice_pk_bytes, + bob_addr + ); + + assert!(Account[alice_addr].signer_capability_offer.for.contains(&bob_addr), 0); + + let signer = create_authorized_signer(&bob, alice_addr); + assert!(signer::address_of(&signer) == signer::address_of(&alice), 0); + + permissioned_signer::destroy_permissioned_handle(alice_permission_handle); + } + #[test(bob = @0x345)] public entry fun test_get_signer_cap_and_is_signer_cap(bob: signer) acquires Account { let (alice_sk, alice_pk) = ed25519::generate_keys(); @@ -1144,7 +1681,7 @@ module aptos_framework::account { create_account(bob_addr); let challenge = SignerCapabilityOfferProofChallengeV2 { - sequence_number: borrow_global(alice_addr).sequence_number, + sequence_number: Account[alice_addr].sequence_number, source_address: alice_addr, recipient_address: bob_addr, }; @@ -1179,7 +1716,7 @@ module aptos_framework::account { create_account(bob_addr); let challenge = SignerCapabilityOfferProofChallengeV2 { - sequence_number: borrow_global(alice_addr).sequence_number, + sequence_number: Account[alice_addr].sequence_number, source_address: alice_addr, recipient_address: bob_addr, }; @@ -1194,8 +1731,8 @@ module aptos_framework::account { bob_addr ); - let alice_account_resource = borrow_global_mut(alice_addr); - assert!(option::contains(&alice_account_resource.signer_capability_offer.for, &bob_addr), 0); + let alice_account_resource = &mut Account[alice_addr]; + assert!(alice_account_resource.signer_capability_offer.for.contains(&bob_addr), 0); create_authorized_signer(&charlie, alice_addr); } @@ -1211,7 +1748,7 @@ module aptos_framework::account { create_account(bob_addr); let challenge = SignerCapabilityOfferProofChallengeV2 { - sequence_number: borrow_global(alice_addr).sequence_number, + sequence_number: Account[alice_addr].sequence_number, source_address: alice_addr, recipient_address: bob_addr, }; @@ -1235,7 +1772,7 @@ module aptos_framework::account { let alice_pk_bytes = ed25519::validated_public_key_to_bytes(&alice_pk); let alice = create_account_from_ed25519_public_key(alice_pk_bytes); let alice_addr = signer::address_of(&alice); - let alice_account_resource = borrow_global(alice_addr); + let alice_account_resource = &Account[alice_addr]; let bob_addr = signer::address_of(&bob); create_account(bob_addr); @@ -1290,8 +1827,8 @@ module aptos_framework::account { bob_addr ); - let alice_resource = borrow_global_mut(signer::address_of(&alice)); - assert!(option::contains(&alice_resource.rotation_capability_offer.for, &bob_addr), 0); + let alice_resource = &mut Account[signer::address_of(&alice)]; + assert!(alice_resource.rotation_capability_offer.for.contains(&bob_addr)); } #[test(bob = @0x345, framework = @aptos_framework)] @@ -1412,7 +1949,7 @@ module aptos_framework::account { let new_address = from_bcs::to_address(new_auth_key); let challenge = RotationProofChallenge { - sequence_number: borrow_global(alice_addr).sequence_number, + sequence_number: Account[alice_addr].sequence_number, originator: alice_addr, current_auth_key: alice_addr, new_public_key: multi_ed25519::unvalidated_public_key_to_bytes(&new_pk_unvalidated), @@ -1430,10 +1967,10 @@ module aptos_framework::account { multi_ed25519::signature_to_bytes(&from_sig), multi_ed25519::signature_to_bytes(&to_sig), ); - let address_map = &mut borrow_global_mut(@aptos_framework).address_map; - let expected_originating_address = table::borrow(address_map, new_address); + let address_map = &OriginatingAddress[@aptos_framework].address_map; + let expected_originating_address = address_map.borrow(new_address); assert!(*expected_originating_address == alice_addr, 0); - assert!(borrow_global(alice_addr).authentication_key == new_auth_key, 0); + assert!(Account[alice_addr].authentication_key == new_auth_key, 0); } #[test(account = @aptos_framework)] @@ -1448,7 +1985,7 @@ module aptos_framework::account { let alice_addr = from_bcs::to_address(curr_auth_key); let alice = create_account_unchecked(alice_addr); - let account_resource = borrow_global_mut(alice_addr); + let account_resource = &mut Account[alice_addr]; let (new_sk, new_pk) = ed25519::generate_keys(); let new_pk_unvalidated = ed25519::public_key_to_unvalidated(&new_pk); @@ -1475,12 +2012,46 @@ module aptos_framework::account { ed25519::signature_to_bytes(&to_sig), ); - let address_map = &mut borrow_global_mut(@aptos_framework).address_map; - let expected_originating_address = table::borrow(address_map, new_addr); + let address_map = &OriginatingAddress[@aptos_framework].address_map; + let expected_originating_address = address_map.borrow(new_addr); assert!(*expected_originating_address == alice_addr, 0); - assert!(borrow_global(alice_addr).authentication_key == new_auth_key, 0); + assert!(Account[alice_addr].authentication_key == new_auth_key, 0); } + #[test(account = @aptos_framework)] + public entry fun test_add_ed25519_backup_key_to_keyless_account( + account: signer + ) acquires Account { + initialize(&account); + let keyless_pk_bytes: vector = x"031b68747470733a2f2f6163636f756e74732e676f6f676c652e636f6d2086bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514"; + let curr_pk = single_key::new_public_key_from_bytes(keyless_pk_bytes); + let alice_addr = from_bcs::to_address(curr_pk.to_authentication_key()); + let alice = create_account_unchecked(alice_addr); + + let (new_sk, new_pk) = ed25519::generate_keys(); + + let challenge = RotationProofChallenge { + sequence_number: Account[alice_addr].sequence_number, + originator: alice_addr, + current_auth_key: alice_addr, + new_public_key: ed25519::validated_public_key_to_bytes(&new_pk), + }; + + let new_pk_unvalidated = ed25519::public_key_to_unvalidated(&new_pk); + let backup_key_as_single_key = single_key::from_ed25519_public_key_unvalidated(new_pk_unvalidated); + let new_public_key = multi_key::new_multi_key_from_single_keys(vector[curr_pk, backup_key_as_single_key], 1); + let new_auth_key = new_public_key.to_authentication_key(); + + let to_sig = ed25519::sign_struct(&new_sk, challenge); + + upsert_ed25519_backup_key_on_keyless_account( + &alice, + keyless_pk_bytes, + ed25519::validated_public_key_to_bytes(&new_pk), + ed25519::signature_to_bytes(&to_sig), + ); + assert!(Account[alice_addr].authentication_key == new_auth_key, 0); + } #[test(account = @aptos_framework)] public entry fun test_simple_rotation(account: &signer) acquires Account { @@ -1495,7 +2066,7 @@ module aptos_framework::account { let _new_addr = from_bcs::to_address(new_auth_key); rotate_authentication_key_call(&alice, new_auth_key); - assert!(borrow_global(alice_addr).authentication_key == new_auth_key, 0); + assert!(Account[alice_addr].authentication_key == new_auth_key, 0); } @@ -1504,7 +2075,7 @@ module aptos_framework::account { public entry fun test_max_guid(account: &signer) acquires Account { let addr = signer::address_of(account); create_account_unchecked(addr); - let account_state = borrow_global_mut(addr); + let account_state = &mut Account[addr]; account_state.guid_creation_num = MAX_GUID_CREATION_NUM - 1; create_guid(account); } @@ -1521,15 +2092,14 @@ module aptos_framework::account { create_account_unchecked(addr); register_coin(addr); - let eventhandle = &borrow_global(addr).coin_register_events; - let event = CoinRegisterEvent { type_info: type_info::type_of() }; + let event = CoinRegister { account: addr, type_info: type_info::type_of() }; - let events = event::emitted_events_by_handle(eventhandle); - assert!(vector::length(&events) == 1, 0); - assert!(vector::borrow(&events, 0) == &event, 1); - assert!(event::was_event_emitted_by_handle(eventhandle, &event), 2); + let events = event::emitted_events(); + assert!(events.length() == 1, 0); + assert!(events.borrow(0) == &event, 1); + assert!(event::was_event_emitted(&event), 2); - let event = CoinRegisterEvent { type_info: type_info::type_of() }; - assert!(!event::was_event_emitted_by_handle(eventhandle, &event), 3); + let event = CoinRegister { account: addr, type_info: type_info::type_of() }; + assert!(!event::was_event_emitted(&event), 3); } } diff --git a/aptos-move/framework/aptos-framework/sources/account.spec.move b/aptos-move/framework/aptos-framework/sources/account/account.spec.move similarity index 95% rename from aptos-move/framework/aptos-framework/sources/account.spec.move rename to aptos-move/framework/aptos-framework/sources/account/account.spec.move index 2b80dbdce527d..24efc4823fd73 100644 --- a/aptos-move/framework/aptos-framework/sources/account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/account/account.spec.move @@ -105,8 +105,7 @@ spec aptos_framework::account { /// spec module { - pragma verify = true; - pragma aborts_if_is_strict; + pragma verify = false; } /// Only the address `@aptos_framework` can call. @@ -121,7 +120,7 @@ spec aptos_framework::account { /// Ensure that the account exists at the end of the call. spec create_account_if_does_not_exist(account_address: address) { let authentication_key = bcs::to_bytes(account_address); - + modifies global(account_address); aborts_if !exists(account_address) && ( account_address == @vm_reserved || account_address == @aptos_framework @@ -146,14 +145,24 @@ spec aptos_framework::account { /// Check if the bytes of the new address is 32. /// The Account does not exist under the new address before creating the account. spec create_account_unchecked(new_address: address): signer { + pragma opaque; include CreateAccountAbortsIf {addr: new_address}; + modifies global(new_address); ensures signer::address_of(result) == new_address; ensures exists(new_address); } spec exists_at { + pragma opaque; /// [high-level-req-3] aborts_if false; + ensures result == spec_exists_at(addr); + } + + spec fun spec_exists_at(addr: address): bool { + use std::features; + use std::features::DEFAULT_ACCOUNT_RESOURCE; + features::spec_is_enabled(DEFAULT_ACCOUNT_RESOURCE) || exists(addr) } spec schema CreateAccountAbortsIf { @@ -187,8 +196,13 @@ spec aptos_framework::account { } spec get_authentication_key(addr: address): vector { + pragma opaque; aborts_if !exists(addr); - ensures result == global(addr).authentication_key; + ensures result == spec_get_authentication_key(addr); + } + + spec fun spec_get_authentication_key(addr: address): vector { + global(addr).authentication_key } /// The Account existed under the signer before the call. @@ -213,6 +227,10 @@ spec aptos_framework::account { ensures account_resource.authentication_key == new_auth_key; } + spec rotate_authentication_key_from_public_key(account: &signer, scheme: u8, new_public_key_bytes: vector) { + aborts_if scheme != ED25519_SCHEME && scheme != MULTI_ED25519_SCHEME && scheme != SINGLE_KEY_SCHEME && scheme != MULTI_KEY_SCHEME; + } + spec fun spec_assert_valid_rotation_proof_signature_and_get_auth_key(scheme: u8, public_key_bytes: vector, signature: vector, challenge: RotationProofChallenge): vector; spec assert_valid_rotation_proof_signature_and_get_auth_key(scheme: u8, public_key_bytes: vector, signature: vector, challenge: &RotationProofChallenge): vector { @@ -390,7 +408,6 @@ spec aptos_framework::account { source_address, recipient_address, }; - aborts_if !exists(@aptos_framework); aborts_if !exists(recipient_address); aborts_if !exists(source_address); @@ -575,6 +592,7 @@ spec aptos_framework::account { // This function should not abort assuming the result of `sha3_256` is deserializable into an address. aborts_if [abstract] false; ensures [abstract] result == spec_create_resource_address(source, seed); + ensures [abstract] source != result; // We can assume that the derived resource account does not equal to `source` } spec fun spec_create_resource_address(source: address, seed: vector): address; @@ -584,8 +602,8 @@ spec aptos_framework::account { let resource_addr = spec_create_resource_address(source_addr, seed); aborts_if len(ZERO_AUTH_KEY) != 32; - include exists_at(resource_addr) ==> CreateResourceAccountAbortsIf; - include !exists_at(resource_addr) ==> CreateAccountAbortsIf {addr: resource_addr}; + include spec_exists_at(resource_addr) ==> CreateResourceAccountAbortsIf; + include !spec_exists_at(resource_addr) ==> CreateAccountAbortsIf {addr: resource_addr}; ensures signer::address_of(result_1) == resource_addr; let post offer_for = global(resource_addr).signer_capability_offer.for; @@ -617,7 +635,7 @@ spec aptos_framework::account { } /// The Account existed under the signer. - /// The guid_creation_num of the ccount resource is up to MAX_U64. + /// The guid_creation_num of the account resource is up to MAX_U64. spec create_guid(account_signer: &signer): guid::GUID { let addr = signer::address_of(account_signer); include NewEventHandleAbortsIf { @@ -656,8 +674,12 @@ spec aptos_framework::account { spec schema CreateResourceAccountAbortsIf { resource_addr: address; let account = global(resource_addr); - aborts_if len(account.signer_capability_offer.for.vec) != 0; - aborts_if account.sequence_number != 0; + // aborts_if len(account.signer_capability_offer.for.vec) != 0; + // aborts_if account.sequence_number != 0; + } + + spec originating_address(auth_key: address): Option
{ + pragma verify=false; } spec update_auth_key_and_originating_address_table( @@ -680,6 +702,7 @@ spec aptos_framework::account { aborts_if table::spec_contains(address_map, curr_auth_key) && table::spec_get(address_map, curr_auth_key) != originating_addr; aborts_if !from_bcs::deserializable
(new_auth_key_vector); + aborts_if curr_auth_key == new_auth_key; aborts_if curr_auth_key != new_auth_key && table::spec_contains(address_map, new_auth_key); ensures table::spec_contains(global(@aptos_framework).address_map, from_bcs::deserialize
(new_auth_key_vector)); @@ -728,4 +751,8 @@ spec aptos_framework::account { aborts_if account_scheme != ED25519_SCHEME && account_scheme != MULTI_ED25519_SCHEME; } + + spec set_originating_address(account: &signer) { + pragma verify=false; + } } diff --git a/aptos-move/framework/aptos-framework/sources/account/account_abstraction.move b/aptos-move/framework/aptos-framework/sources/account/account_abstraction.move new file mode 100644 index 0000000000000..47c6b564f835a --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/account_abstraction.move @@ -0,0 +1,377 @@ +module aptos_framework::account_abstraction { + use std::bcs; + use std::hash; + use aptos_std::from_bcs; + + use std::error; + use std::option::{Self, Option}; + use std::signer; + use std::string::{Self, String}; + use aptos_std::ordered_map::{Self, OrderedMap}; + use aptos_std::big_ordered_map::{Self, BigOrderedMap}; + use aptos_framework::create_signer; + use aptos_framework::event; + use aptos_framework::features; + use aptos_framework::function_info::{Self, FunctionInfo}; + use aptos_framework::object; + use aptos_framework::auth_data::AbstractionAuthData; + use aptos_framework::system_addresses; + use aptos_framework::permissioned_signer::is_permissioned_signer; + #[test_only] + use aptos_framework::account::create_account_for_test; + #[test_only] + use aptos_framework::auth_data; + + friend aptos_framework::transaction_validation; + #[test_only] + friend aptos_framework::account_abstraction_tests; + + const EDISPATCHABLE_AUTHENTICATOR_IS_NOT_USED: u64 = 1; + const EFUNCTION_INFO_EXISTENCE: u64 = 2; + const EAUTH_FUNCTION_SIGNATURE_MISMATCH: u64 = 3; + const ENOT_MASTER_SIGNER: u64 = 4; + const EINCONSISTENT_SIGNER_ADDRESS: u64 = 5; + const EDEPRECATED_FUNCTION: u64 = 6; + const EDERIVABLE_AA_NOT_INITIALIZED: u64 = 7; + + const EACCOUNT_ABSTRACTION_NOT_ENABLED: u64 = 8; + const EDERIVABLE_ACCOUNT_ABSTRACTION_NOT_ENABLED: u64 = 9; + + /// derivable_aa_account_address uses this for domain separation within its native implementation + /// source is defined in Scheme enum in types/src/transaction/authenticator.rs + const DERIVABLE_ABSTRACTION_DERIVED_SCHEME: u8 = 5; + + const MAX_U64: u128 = 18446744073709551615; + + #[event] + struct UpdateDispatchableAuthenticator has store, drop { + account: address, + update: vector, + auth_function: FunctionInfo, + } + + #[event] + struct RemoveDispatchableAuthenticator has store, drop { + account: address, + } + + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// The dispatchable authenticator that defines how to authenticates this account in the specified module. + /// An integral part of Account Abstraction. + enum DispatchableAuthenticator has key, copy, drop { + V1 { auth_functions: OrderedMap } + } + + enum DerivableRegisterValue has store { + Empty, + } + + /// The dispatchable derivable-scoped authenticator, that defines how to authenticate + enum DerivableDispatchableAuthenticator has key { + V1 { auth_functions: BigOrderedMap } + } + + #[view] + /// Return `true` if the account is an abstracted account that can be authenticated with dispatchable move authenticator. + public fun using_dispatchable_authenticator(addr: address): bool { + exists(resource_addr(addr)) + } + + #[view] + /// Return the current dispatchable authenticator move function info. `None` means this authentication scheme is disabled. + public fun dispatchable_authenticator(addr: address): Option> acquires DispatchableAuthenticator { + let resource_addr = resource_addr(addr); + if (exists(resource_addr)) { + option::some( + DispatchableAuthenticator[resource_addr].auth_functions.keys() + ) + } else { option::none() } + } + + #[view] + /// Return the account address corresponding to the given `abstract_public_key`, + /// for the derivable account abstraction defined by the given function. + public fun derive_account_address_view( + module_address: address, + module_name: String, + function_name: String, + abstract_public_key: vector + ): address { + derive_account_address( + function_info::new_function_info_from_address(module_address, module_name, function_name), + &abstract_public_key, + ) + } + + /// Return the account address corresponding to the given `abstract_public_key`, + /// for the derivable account abstraction defined by the given function. + /// TODO: probably worth creating some module with all these derived functions, + /// and do computation/caching in rust to avoid recomputation, as we do for objects. + public fun derive_account_address(derivable_func_info: FunctionInfo, abstract_public_key: &vector): address { + // using bcs serialized structs here - this allows for no need for separators. + // Alternative would've been to create unique string, we would need to convert derivable_func_info into string, + // then authentication_key to hex, and then we need separators as well - like :: + let bytes = bcs::to_bytes(&derivable_func_info); + bytes.append(bcs::to_bytes(abstract_public_key)); + bytes.push_back(DERIVABLE_ABSTRACTION_DERIVED_SCHEME); + from_bcs::to_address(hash::sha3_256(bytes)) + } + + /// Add dispatchable authentication function that enables account abstraction via this function. + /// Note: it is a private entry function that can only be called directly from transaction. + entry fun add_authentication_function( + account: &signer, + module_address: address, + module_name: String, + function_name: String, + ) acquires DispatchableAuthenticator { + assert!(features::is_account_abstraction_enabled(), error::invalid_state(EACCOUNT_ABSTRACTION_NOT_ENABLED)); + assert!(!is_permissioned_signer(account), error::permission_denied(ENOT_MASTER_SIGNER)); + update_dispatchable_authenticator_impl( + account, + function_info::new_function_info_from_address(module_address, module_name, function_name), + true + ); + } + + /// Remove dispatchable authentication function that enables account abstraction via this function. + /// dispatchable function needs to verify that signing_data.authenticator() is a valid signature of signing_data.digest(). + /// Note: it is a private entry function that can only be called directly from transaction. + entry fun remove_authentication_function( + account: &signer, + module_address: address, + module_name: String, + function_name: String, + ) acquires DispatchableAuthenticator { + assert!(!is_permissioned_signer(account), error::permission_denied(ENOT_MASTER_SIGNER)); + update_dispatchable_authenticator_impl( + account, + function_info::new_function_info_from_address(module_address, module_name, function_name), + false + ); + } + + /// Remove dispatchable authenticator so that all dispatchable authentication functions will be removed as well. + /// After calling this function, the account is not abstracted at all. + /// Note: it is a private entry function that can only be called directly from transaction. + entry fun remove_authenticator( + account: &signer, + ) acquires DispatchableAuthenticator { + assert!(!is_permissioned_signer(account), error::permission_denied(ENOT_MASTER_SIGNER)); + let addr = signer::address_of(account); + let resource_addr = resource_addr(addr); + if (exists(resource_addr)) { + move_from(resource_addr); + event::emit(RemoveDispatchableAuthenticator { + account: addr, + }); + }; + } + + /// Add dispatchable derivable authentication function, that enables account abstraction via this function. + /// This means all accounts within the domain can use it to authenticate, without needing an initialization (unlike non-domain AA). + /// dispatchable function needs to verify two things: + /// - that signing_data.derivable_abstract_signature() is a valid signature of signing_data.digest() (just like regular AA) + /// - that signing_data.derivable_abstract_public_key() is correct identity representing the authenticator + /// (missing this step would allow impersonation) + /// + /// Note: This is public entry function, as it requires framework signer, and that can + /// only be obtained as a part of the governance script. + public entry fun register_derivable_authentication_function( + aptos_framework: &signer, + module_address: address, + module_name: String, + function_name: String, + ) acquires DerivableDispatchableAuthenticator { + assert!(features::is_derivable_account_abstraction_enabled(), error::invalid_state(EDERIVABLE_ACCOUNT_ABSTRACTION_NOT_ENABLED)); + system_addresses::assert_aptos_framework(aptos_framework); + + DerivableDispatchableAuthenticator[@aptos_framework].auth_functions.add( + function_info::new_function_info_from_address(module_address, module_name, function_name), + DerivableRegisterValue::Empty, + ); + } + + public entry fun initialize(aptos_framework: &signer) { + system_addresses::assert_aptos_framework(aptos_framework); + move_to( + aptos_framework, + DerivableDispatchableAuthenticator::V1 { auth_functions: big_ordered_map::new_with_config(0, 0, false) } + ); + } + + inline fun resource_addr(source: address): address { + object::create_user_derived_object_address(source, @aptos_fungible_asset) + } + + fun update_dispatchable_authenticator_impl( + account: &signer, + auth_function: FunctionInfo, + is_add: bool, + ) acquires DispatchableAuthenticator { + let addr = signer::address_of(account); + let resource_addr = resource_addr(addr); + let dispatcher_auth_function_info = function_info::new_function_info_from_address( + @aptos_framework, + string::utf8(b"account_abstraction"), + string::utf8(b"dispatchable_authenticate"), + ); + assert!( + function_info::check_dispatch_type_compatibility(&dispatcher_auth_function_info, &auth_function), + error::invalid_argument(EAUTH_FUNCTION_SIGNATURE_MISMATCH) + ); + if (is_add) { + if (!exists(resource_addr)) { + move_to( + &create_signer::create_signer(resource_addr), + DispatchableAuthenticator::V1 { auth_functions: ordered_map::new() } + ); + }; + let current_map = &mut borrow_global_mut(resource_addr).auth_functions; + assert!( + !current_map.contains(&auth_function), + error::already_exists(EFUNCTION_INFO_EXISTENCE) + ); + current_map.add(auth_function, true); + event::emit( + UpdateDispatchableAuthenticator { + account: addr, + update: b"add", + auth_function, + } + ); + } else { + assert!(exists(resource_addr), error::not_found(EFUNCTION_INFO_EXISTENCE)); + let current_map = &mut borrow_global_mut(resource_addr).auth_functions; + assert!( + current_map.contains(&auth_function), + error::not_found(EFUNCTION_INFO_EXISTENCE) + ); + current_map.remove(&auth_function); + event::emit( + UpdateDispatchableAuthenticator { + account: addr, + update: b"remove", + auth_function, + } + ); + if (current_map.length() == 0) { + remove_authenticator(account); + } + }; + } + + inline fun dispatchable_authenticator_internal(addr: address): &OrderedMap { + assert!(using_dispatchable_authenticator(addr), error::not_found(EDISPATCHABLE_AUTHENTICATOR_IS_NOT_USED)); + &DispatchableAuthenticator[resource_addr(addr)].auth_functions + } + + inline fun dispatchable_derivable_authenticator_internal(): &BigOrderedMap { + assert!(exists(@aptos_framework), error::not_found(EDERIVABLE_AA_NOT_INITIALIZED)); + &DerivableDispatchableAuthenticator[@aptos_framework].auth_functions + } + + fun authenticate( + account: signer, + func_info: FunctionInfo, + signing_data: AbstractionAuthData, + ): signer acquires DispatchableAuthenticator, DerivableDispatchableAuthenticator { + let master_signer_addr = signer::address_of(&account); + + if (signing_data.is_derivable()) { + assert!(features::is_derivable_account_abstraction_enabled(), error::invalid_state(EDERIVABLE_ACCOUNT_ABSTRACTION_NOT_ENABLED)); + assert!(master_signer_addr == derive_account_address(func_info, signing_data.derivable_abstract_public_key()), error::invalid_state(EINCONSISTENT_SIGNER_ADDRESS)); + + let func_infos = dispatchable_derivable_authenticator_internal(); + assert!(func_infos.contains(&func_info), error::not_found(EFUNCTION_INFO_EXISTENCE)); + } else { + assert!(features::is_account_abstraction_enabled(), error::invalid_state(EACCOUNT_ABSTRACTION_NOT_ENABLED)); + + let func_infos = dispatchable_authenticator_internal(master_signer_addr); + assert!(func_infos.contains(&func_info), error::not_found(EFUNCTION_INFO_EXISTENCE)); + }; + + function_info::load_module_from_function(&func_info); + let returned_signer = dispatchable_authenticate(account, signing_data, &func_info); + // Returned signer MUST represent the same account address. Otherwise, it may break the invariant of Aptos blockchain! + assert!( + master_signer_addr == signer::address_of(&returned_signer), + error::invalid_state(EINCONSISTENT_SIGNER_ADDRESS) + ); + returned_signer + } + + /// The native function to dispatch customized move authentication function. + native fun dispatchable_authenticate( + account: signer, + signing_data: AbstractionAuthData, + function: &FunctionInfo + ): signer; + + #[test(bob = @0xb0b)] + entry fun test_dispatchable_authenticator( + bob: &signer, + ) acquires DispatchableAuthenticator { + let bob_addr = signer::address_of(bob); + create_account_for_test(bob_addr); + assert!(!using_dispatchable_authenticator(bob_addr)); + add_authentication_function( + bob, + @aptos_framework, + string::utf8(b"account_abstraction_tests"), + string::utf8(b"test_auth") + ); + assert!(using_dispatchable_authenticator(bob_addr)); + remove_authenticator(bob); + assert!(!using_dispatchable_authenticator(bob_addr)); + } + + #[test(bob = @0xb0b)] + #[expected_failure(abort_code = 0x30005, location = Self)] + entry fun test_authenticate_function_returning_invalid_signer( + bob: signer, + ) acquires DispatchableAuthenticator, DerivableDispatchableAuthenticator { + let bob_addr = signer::address_of(&bob); + create_account_for_test(bob_addr); + assert!(!using_dispatchable_authenticator(bob_addr), 0); + add_authentication_function( + &bob, + @aptos_framework, + string::utf8(b"account_abstraction_tests"), + string::utf8(b"invalid_authenticate") + ); + let function_info = function_info::new_function_info_from_address( + @aptos_framework, + string::utf8(b"account_abstraction_tests"), + string::utf8(b"invalid_authenticate") + ); + authenticate(bob, function_info, auth_data::create_auth_data(vector[], vector[])); + } + + #[deprecated] + public entry fun add_dispatchable_authentication_function( + _account: &signer, + _module_address: address, + _module_name: String, + _function_name: String, + ) { + abort std::error::unavailable(EDEPRECATED_FUNCTION) + } + + #[deprecated] + public entry fun remove_dispatchable_authentication_function( + _account: &signer, + _module_address: address, + _module_name: String, + _function_name: String, + ) { + abort std::error::unavailable(EDEPRECATED_FUNCTION) + } + + #[deprecated] + public entry fun remove_dispatchable_authenticator( + _account: &signer, + ) { + abort std::error::unavailable(EDEPRECATED_FUNCTION) + } +} diff --git a/aptos-move/framework/aptos-framework/sources/account/account_abstraction.spec.move b/aptos-move/framework/aptos-framework/sources/account/account_abstraction.spec.move new file mode 100644 index 0000000000000..00163529aa71e --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/account_abstraction.spec.move @@ -0,0 +1,17 @@ +spec aptos_framework::account_abstraction { + spec module { + pragma verify = false; + } + + + spec fun spec_dispatchable_authenticate( + account: signer, + signing_data: AbstractionAuthData, + function: &FunctionInfo + ): signer; + + spec dispatchable_authenticate(account: signer, signing_data: AbstractionAuthData, function: &FunctionInfo): signer { + pragma opaque; + ensures [abstract] result == spec_dispatchable_authenticate(account, signing_data, function); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/account/auth_data.move b/aptos-move/framework/aptos-framework/sources/account/auth_data.move new file mode 100644 index 0000000000000..28e6b842504ae --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/auth_data.move @@ -0,0 +1,58 @@ +module aptos_framework::auth_data { + use std::error; + + const ENOT_REGULAR_AUTH_DATA: u64 = 1; + const ENOT_DERIVABLE_AUTH_DATA: u64 = 2; + + enum AbstractionAuthData has copy, drop { + V1 { + digest: vector, + authenticator: vector + }, + DerivableV1 { + digest: vector, + abstract_signature: vector, + abstract_public_key: vector, + }, + } + + #[test_only] + public fun create_auth_data(digest: vector, authenticator: vector): AbstractionAuthData { + AbstractionAuthData::V1 { digest, authenticator } + } + + public fun digest(self: &AbstractionAuthData): &vector { + &self.digest + } + + // separate authenticator and derivable_authenticator - to not allow accidental mixing + // in user authentication code + + #[test_only] + public fun create_derivable_auth_data( + digest: vector, + abstract_signature: vector, + abstract_public_key: vector + ): AbstractionAuthData { + AbstractionAuthData::DerivableV1 { digest, abstract_signature, abstract_public_key } + } + + public fun authenticator(self: &AbstractionAuthData): &vector { + assert!(self is V1, error::invalid_argument(ENOT_REGULAR_AUTH_DATA)); + &self.authenticator + } + + public fun is_derivable(self: &AbstractionAuthData): bool { + self is DerivableV1 + } + + public fun derivable_abstract_signature(self: &AbstractionAuthData): &vector { + assert!(self is DerivableV1, error::invalid_argument(ENOT_REGULAR_AUTH_DATA)); + &self.abstract_signature + } + + public fun derivable_abstract_public_key(self: &AbstractionAuthData): &vector { + assert!(self is DerivableV1, error::invalid_argument(ENOT_DERIVABLE_AUTH_DATA)); + &self.abstract_public_key + } +} diff --git a/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/base16.move b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/base16.move new file mode 100644 index 0000000000000..9e068291877f1 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/base16.move @@ -0,0 +1,34 @@ +module aptos_framework::base16 { + + use std::vector; + + friend aptos_framework::ethereum_derivable_account; + + // Convert a hex character to a u8 + public(friend) fun hex_char_to_u8(c: u8): u8 { + if (c >= 48 && c <= 57) { // '0' to '9' + c - 48 + } else if (c >= 65 && c <= 70) { // 'A' to 'F' + c - 55 + } else if (c >= 97 && c <= 102) { // 'a' to 'f' + c - 87 + } else { + abort 1 + } + } + + // Convert a base16 encoded string to a vector of u8 + public(friend) fun base16_utf8_to_vec_u8(str: vector): vector { + let result = vector::empty(); + let i = 0; + while (i < vector::length(&str)) { + let c1 = vector::borrow(&str, i); + let c2 = vector::borrow(&str, i + 1); + let byte = hex_char_to_u8(*c1) << 4 | hex_char_to_u8(*c2); + vector::push_back(&mut result, byte); + i = i + 2; + }; + result + } + +} diff --git a/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/base16.spec.move b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/base16.spec.move new file mode 100644 index 0000000000000..dc695f2771e84 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/base16.spec.move @@ -0,0 +1,9 @@ +spec aptos_framework::base16 { + + spec base16_utf8_to_vec_u8(str: vector): vector { + pragma opaque; + ensures [abstract] result == spec_base16_utf8_to_vec_u8(str); + } + + spec fun spec_base16_utf8_to_vec_u8(str: vector): vector; +} diff --git a/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/common_account_abstractions_utils.move b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/common_account_abstractions_utils.move new file mode 100644 index 0000000000000..6546280d51314 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/common_account_abstractions_utils.move @@ -0,0 +1,82 @@ +module aptos_framework::common_account_abstractions_utils { + use std::chain_id; + use std::string_utils; + use std::transaction_context::{Self, EntryFunctionPayload}; + + friend aptos_framework::ethereum_derivable_account; + friend aptos_framework::solana_derivable_account; + + public(friend) fun network_name(): vector { + let chain_id = chain_id::get(); + if (chain_id == 1) { + b"mainnet" + } else if (chain_id == 2) { + b"testnet" + } else if (chain_id == 4) { + b"local" + } else { + let network_name = &mut vector[]; + network_name.append(b"custom network: "); + network_name.append(*string_utils::to_string(&chain_id).bytes()); + *network_name + } + } + + public(friend) fun entry_function_name(entry_function_payload: &EntryFunctionPayload): vector { + let entry_function_name = &mut vector[]; + let addr_str = string_utils::to_string( + &transaction_context::account_address(entry_function_payload) + ).bytes(); + // .slice(1) to remove the leading '@' char + entry_function_name.append(addr_str.slice(1, addr_str.length())); + entry_function_name.append(b"::"); + entry_function_name.append( + *transaction_context::module_name(entry_function_payload).bytes() + ); + entry_function_name.append(b"::"); + entry_function_name.append( + *transaction_context::function_name(entry_function_payload).bytes() + ); + *entry_function_name + } + + #[test_only] + use std::string::utf8; + + #[test(framework = @0x1)] + fun test_network_name_mainnet(framework: &signer) { + chain_id::initialize_for_test(framework, 1); + assert!(network_name() == b"mainnet"); + } + + #[test(framework = @0x1)] + fun test_network_name_testnet(framework: &signer) { + chain_id::initialize_for_test(framework, 2); + assert!(network_name() == b"testnet"); + } + + #[test(framework = @0x1)] + fun test_network_name_local(framework: &signer) { + chain_id::initialize_for_test(framework, 4); + assert!(network_name() == b"local"); + } + + #[test(framework = @0x1)] + fun test_network_name_other(framework: &signer) { + chain_id::initialize_for_test(framework, 99); + assert!(network_name() == b"custom network: 99"); + } + + #[test(framework = @0x1)] + fun test_entry_function_name() { + let entry_function_payload = transaction_context::new_entry_function_payload( + @0x1, + utf8(b"coin"), + utf8(b"transfer"), + vector[], + vector[] + ); + let entry_function_name = entry_function_name(&entry_function_payload); + assert!(entry_function_name == b"0x1::coin::transfer"); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/ethereum_derivable_account.move b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/ethereum_derivable_account.move new file mode 100644 index 0000000000000..26a94d8309b07 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/ethereum_derivable_account.move @@ -0,0 +1,387 @@ +/// Derivable account abstraction that verifies a message signed by +/// SIWE. +/// 1. The message format is as follows: +/// +/// wants you to sign in with your Ethereum account: +/// +/// +/// Please confirm you explicitly initiated this request from . You are approving to execute transaction on Aptos blockchain (). +/// +/// URI: :// +/// Version: 1 +/// Chain ID: +/// Nonce: +/// Issued At: +/// +/// 2. The abstract public key is a BCS serialized `SIWEAbstractPublicKey`. +/// 3. The abstract signature is a BCS serialized `SIWEAbstractSignature`. +/// 4. This module has been tested for the following wallets: +/// - Metamask +/// - Phantom +/// - Coinbase +/// - OKX +/// - Exodus +/// - Backpack + +module aptos_framework::ethereum_derivable_account { + use aptos_framework::auth_data::AbstractionAuthData; + use aptos_framework::common_account_abstractions_utils::{network_name, entry_function_name}; + use aptos_framework::base16::base16_utf8_to_vec_u8; + use aptos_std::secp256k1; + use aptos_std::option; + use aptos_std::aptos_hash; + use std::bcs_stream::{Self, deserialize_u8}; + use std::chain_id; + use std::string_utils; + use std::transaction_context; + use std::vector; + use std::string::{Self, String}; + + /// Signature failed to verify. + const EINVALID_SIGNATURE: u64 = 1; + /// Entry function payload is missing. + const EMISSING_ENTRY_FUNCTION_PAYLOAD: u64 = 2; + /// Invalid signature type. + const EINVALID_SIGNATURE_TYPE: u64 = 3; + /// Address mismatch. + const EADDR_MISMATCH: u64 = 4; + /// Unexpected v value. + const EUNEXPECTED_V: u64 = 5; + + enum SIWEAbstractSignature has drop { + /// Deprecated, use MessageV2 instead + MessageV1 { + /// The date and time when the signature was issued + issued_at: String, + /// The signature of the message + signature: vector, + }, + MessageV2 { + /// The scheme in the URI of the message, e.g. the scheme of the website that requested the signature (http, https, etc.) + scheme: String, + /// The date and time when the signature was issued + issued_at: String, + /// The signature of the message + signature: vector, + }, + } + + struct SIWEAbstractPublicKey has drop { + // The Ethereum address, with 0x prefix, in utf8 bytes + ethereum_address: vector, + // The domain, in utf8 bytes + domain: vector, + } + + /// Deserializes the abstract public key which is supposed to be a bcs + /// serialized `SIWEAbstractPublicKey`. + fun deserialize_abstract_public_key(abstract_public_key: &vector): SIWEAbstractPublicKey { + let stream = bcs_stream::new(*abstract_public_key); + let ethereum_address = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + let domain = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + SIWEAbstractPublicKey { ethereum_address, domain } + } + + /// Returns a tuple of the signature type and the signature. + /// We include the issued_at in the signature as it is a required field in the SIWE standard. + fun deserialize_abstract_signature(abstract_signature: &vector): SIWEAbstractSignature { + let stream = bcs_stream::new(*abstract_signature); + let signature_type = bcs_stream::deserialize_u8(&mut stream); + if (signature_type == 0x00) { + let issued_at = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + let signature = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + SIWEAbstractSignature::MessageV1 { issued_at: string::utf8(issued_at), signature } + } else if (signature_type == 0x01) { + let scheme = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + let issued_at = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + let signature = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + SIWEAbstractSignature::MessageV2 { scheme: string::utf8(scheme), issued_at: string::utf8(issued_at), signature } + } else { + abort(EINVALID_SIGNATURE_TYPE) + } + } + + // construct a message that is used to verify the signature following the SIWE standard + // and ethers.js. ethers adds a prefix to the message, so we need to include it also + fun construct_message( + ethereum_address: &vector, + domain: &vector, + entry_function_name: &vector, + digest_utf8: &vector, + issued_at: &vector, + scheme: &vector, + ): vector { + let message = &mut vector[]; + message.append(*domain); + message.append(b" wants you to sign in with your Ethereum account:\n"); + message.append(*ethereum_address); + message.append(b"\n\nPlease confirm you explicitly initiated this request from "); + message.append(*domain); + message.append(b"."); + message.append(b" You are approving to execute transaction "); + message.append(*entry_function_name); + message.append(b" on Aptos blockchain"); + let network_name = network_name(); + message.append(b" ("); + message.append(network_name); + message.append(b")"); + message.append(b"."); + message.append(b"\n\nURI: "); + message.append(*scheme); + message.append(b"://"); + message.append(*domain); + message.append(b"\nVersion: 1"); + message.append(b"\nChain ID: "); + message.append(*string_utils::to_string(&chain_id::get()).bytes()); + message.append(b"\nNonce: "); + message.append(*digest_utf8); + message.append(b"\nIssued At: "); + message.append(*issued_at); + + let msg_len = vector::length(message); + + let prefix = b"\x19Ethereum Signed Message:\n"; + let msg_len_string = string_utils::to_string(&msg_len); // returns string + let msg_len_bytes = msg_len_string.bytes(); // vector + + let full_message = &mut vector[]; + full_message.append(prefix); + full_message.append(*msg_len_bytes); + full_message.append(*message); + + *full_message + } + + fun recover_public_key(signature_bytes: &vector, message: &vector): vector { + let rs = vector::slice(signature_bytes, 0, 64); + let v = *vector::borrow(signature_bytes, 64); + assert!(v == 27 || v == 28, EUNEXPECTED_V); + let signature = secp256k1::ecdsa_signature_from_bytes(rs); + + let maybe_recovered = secp256k1::ecdsa_recover(*message, v - 27, &signature); + + assert!( + option::is_some(&maybe_recovered), + EINVALID_SIGNATURE + ); + + let pubkey = option::borrow(&maybe_recovered); + + let pubkey_bytes = secp256k1::ecdsa_raw_public_key_to_bytes(pubkey); + + // Add 0x04 prefix to the public key, to match the + // full uncompressed format from ethers.js + let full_pubkey = &mut vector[]; + vector::push_back(full_pubkey, 4u8); + vector::append(full_pubkey, pubkey_bytes); + + *full_pubkey + } + + + fun authenticate_auth_data( + aa_auth_data: AbstractionAuthData, + entry_function_name: &vector + ) { + let derivable_abstract_public_key = aa_auth_data.derivable_abstract_public_key(); + let abstract_public_key = deserialize_abstract_public_key(derivable_abstract_public_key); + let digest_utf8 = string_utils::to_string(aa_auth_data.digest()).bytes(); + let abstract_signature = deserialize_abstract_signature(aa_auth_data.derivable_abstract_signature()); + let issued_at = abstract_signature.issued_at.bytes(); + let scheme = abstract_signature.scheme.bytes(); + let message = construct_message(&abstract_public_key.ethereum_address, &abstract_public_key.domain, entry_function_name, digest_utf8, issued_at, scheme); + let hashed_message = aptos_hash::keccak256(message); + let public_key_bytes = recover_public_key(&abstract_signature.signature, &hashed_message); + + // 1. Skip the 0x04 prefix (take the bytes after the first byte) + let public_key_without_prefix = vector::slice(&public_key_bytes, 1, vector::length(&public_key_bytes)); + // 2. Run Keccak256 on the public key (without the 0x04 prefix) + let kexHash = aptos_hash::keccak256(public_key_without_prefix); + // 3. Slice the last 20 bytes (this is the Ethereum address) + let recovered_addr = vector::slice(&kexHash, 12, 32); + // 4. Remove the 0x prefix from the utf8 account address + let ethereum_address_without_prefix = vector::slice(&abstract_public_key.ethereum_address, 2, vector::length(&abstract_public_key.ethereum_address)); + + let account_address_vec = base16_utf8_to_vec_u8(ethereum_address_without_prefix); + // Verify that the recovered address matches the domain account identity + assert!(recovered_addr == account_address_vec, EADDR_MISMATCH); + } + + /// Authorization function for domain account abstraction. + public fun authenticate(account: signer, aa_auth_data: AbstractionAuthData): signer { + let maybe_entry_function_payload = transaction_context::entry_function_payload(); + if (maybe_entry_function_payload.is_some()) { + let entry_function_payload = maybe_entry_function_payload.destroy_some(); + let entry_function_name = entry_function_name(&entry_function_payload); + authenticate_auth_data(aa_auth_data, &entry_function_name); + account + } else { + abort(EMISSING_ENTRY_FUNCTION_PAYLOAD) + } + } + + #[test_only] + use std::bcs; + #[test_only] + use std::string::utf8; + #[test_only] + use aptos_framework::auth_data::{create_derivable_auth_data}; + #[test_only] + fun create_abstract_public_key(ethereum_address: vector, domain: vector): vector { + let abstract_public_key = SIWEAbstractPublicKey { + ethereum_address, + domain, + }; + bcs::to_bytes(&abstract_public_key) + } + + #[test_only] + fun create_raw_signature(scheme: String, issued_at: String, signature: vector): vector { + let abstract_signature = SIWEAbstractSignature::MessageV2 { scheme, issued_at, signature }; + bcs::to_bytes(&abstract_signature) + } + + #[test] + fun test_deserialize_abstract_public_key() { + let ethereum_address = b"0xC7B576Ead6aFb962E2DEcB35814FB29723AEC98a"; + let domain = b"localhost:3001"; + let abstract_public_key = create_abstract_public_key(ethereum_address, domain); + let abstract_public_key = deserialize_abstract_public_key(&abstract_public_key); + assert!(abstract_public_key.ethereum_address == ethereum_address); + assert!(abstract_public_key.domain == domain); + } + + #[test] + fun test_deserialize_abstract_signature_with_https() { + let signature_bytes = vector[ + 249, 247, 194, 250, 31, 233, 100, 234, 109, 142, 6, 193, 203, 33, 147, 199, + 236, 117, 69, 119, 252, 219, 150, 143, 28, 112, 33, 9, 95, 53, 0, 69, + 123, 17, 207, 53, 69, 203, 213, 208, 13, 98, 225, 170, 28, 183, 181, 53, + 58, 209, 105, 56, 204, 253, 73, 82, 201, 197, 201, 139, 201, 19, 65, 215, + 28 + ]; + let abstract_signature = create_raw_signature(utf8(b"https"), utf8(b"2025-01-01T00:00:00.000Z"), signature_bytes); + let siwe_abstract_signature = deserialize_abstract_signature(&abstract_signature); + assert!(siwe_abstract_signature is SIWEAbstractSignature::MessageV2); + match (siwe_abstract_signature) { + SIWEAbstractSignature::MessageV1 { signature, issued_at } => { + assert!(issued_at == utf8(b"2025-01-01T00:00:00.000Z")); + assert!(signature == signature_bytes); + }, + SIWEAbstractSignature::MessageV2 { signature, issued_at, scheme } => { + assert!(scheme == utf8(b"https")); + assert!(issued_at == utf8(b"2025-01-01T00:00:00.000Z")); + assert!(signature == signature_bytes); + }, + }; + } + + #[test] + fun test_deserialize_abstract_signature_with_http() { + let signature_bytes = vector[ + 1, 252, 18, 58, 243, 10, 152, 94, 33, 5, 76, 133, 39, 188, 25, 92, + 242, 39, 32, 84, 181, 94, 231, 9, 49, 141, 131, 20, 108, 93, 76, 144, + 47, 20, 83, 177, 107, 22, 148, 93, 191, 165, 86, 42, 181, 226, 116, 136, + 133, 84, 35, 222, 24, 36, 176, 143, 15, 14, 182, 135, 153, 141, 238, 238, + 28 + ]; + let abstract_signature = create_raw_signature(utf8(b"http"), utf8(b"2025-05-08T23:39:00.000Z"), signature_bytes); + let siwe_abstract_signature = deserialize_abstract_signature(&abstract_signature); + assert!(siwe_abstract_signature is SIWEAbstractSignature::MessageV2); + match (siwe_abstract_signature) { + SIWEAbstractSignature::MessageV1 { signature, issued_at } => { + assert!(issued_at == utf8(b"2025-05-08T23:39:00.000Z")); + assert!(signature == signature_bytes); + }, + SIWEAbstractSignature::MessageV2 { signature, issued_at, scheme } => { + assert!(scheme == utf8(b"http")); + assert!(issued_at == utf8(b"2025-05-08T23:39:00.000Z")); + assert!(signature == signature_bytes); + }, + }; + } + + #[test(framework = @0x1)] + fun test_construct_message(framework: &signer) { + chain_id::initialize_for_test(framework, 4); + + let ethereum_address = b"0xC7B576Ead6aFb962E2DEcB35814FB29723AEC98a"; + let domain = b"localhost:3001"; + let entry_function_name = b"0x1::aptos_account::transfer"; + let digest_utf8 = b"0x2a2f07c32382a94aa90ddfdb97076b77d779656bb9730c4f3e4d22a30df298dd"; + let issued_at = b"2025-01-01T00:00:00.000Z"; + let scheme = b"https"; + let message = construct_message(ðereum_address, &domain, &entry_function_name, &digest_utf8, &issued_at, &scheme); + let expected_message = b"\x19Ethereum Signed Message:\n442localhost:3001 wants you to sign in with your Ethereum account:\n0xC7B576Ead6aFb962E2DEcB35814FB29723AEC98a\n\nPlease confirm you explicitly initiated this request from localhost:3001. You are approving to execute transaction 0x1::aptos_account::transfer on Aptos blockchain (local).\n\nURI: https://localhost:3001\nVersion: 1\nChain ID: 4\nNonce: 0x2a2f07c32382a94aa90ddfdb97076b77d779656bb9730c4f3e4d22a30df298dd\nIssued At: 2025-01-01T00:00:00.000Z"; + assert!(message == expected_message); + } + + #[test(framework = @0x1)] + fun test_recover_public_key(framework: &signer) { + chain_id::initialize_for_test(framework, 4); + let ethereum_address = b"0xC7B576Ead6aFb962E2DEcB35814FB29723AEC98a"; + let domain = b"localhost:3001"; + let entry_function_name = b"0x1::aptos_account::transfer"; + let digest = b"0x705f1f57dd8399bf134e649981af43b5c42e59f985c4e4335ab70ce3f96bcd27"; + let issued_at = b"2025-05-02T16:17:10.714Z"; + let scheme = b"https"; + let message = construct_message(ðereum_address, &domain, &entry_function_name, &digest, &issued_at, &scheme); + let hashed_message = aptos_hash::keccak256(message); + let signature_bytes = vector[ + 162, 57, 230, 98, 9, 139, 202, 15, 110, 61, 237, 54, 252, 234, 202, 13, + 181, 196, 174, 19, 226, 50, 151, 63, 137, 229, 144, 15, 4, 56, 1, 122, + 42, 51, 191, 43, 162, 155, 55, 227, 62, 164, 247, 18, 154, 68, 59, 82, + 108, 124, 83, 72, 224, 158, 79, 20, 123, 172, 105, 71, 12, 114, 208, 246, 27 + ]; + let base64_public_key = recover_public_key(&signature_bytes, &hashed_message); + assert!(base64_public_key == vector[ + 4, 186, 242, 201, 107, 125, 171, 241, 239, 174, 216, 103, 198, 245, 151, 84, + 208, 238, 134, 130, 51, 223, 164, 243, 149, 234, 188, 140, 237, 189, 190, 221, + 95, 60, 172, 1, 22, 96, 232, 105, 172, 184, 198, 168, 157, 54, 230, 217, + 100, 150, 220, 31, 135, 165, 51, 83, 53, 159, 139, 98, 103, 106, 250, 194, 94 + ] + ); + } + + #[test(framework = @0x1)] + fun test_authenticate_auth_data(framework: &signer) { + chain_id::initialize_for_test(framework, 4); + + let digest = x"705f1f57dd8399bf134e649981af43b5c42e59f985c4e4335ab70ce3f96bcd27"; + let signature = vector[ + 162, 57, 230, 98, 9, 139, 202, 15, 110, 61, 237, 54, 252, 234, 202, 13, + 181, 196, 174, 19, 226, 50, 151, 63, 137, 229, 144, 15, 4, 56, 1, 122, + 42, 51, 191, 43, 162, 155, 55, 227, 62, 164, 247, 18, 154, 68, 59, 82, + 108, 124, 83, 72, 224, 158, 79, 20, 123, 172, 105, 71, 12, 114, 208, 246, 27 + ]; + let abstract_signature = create_raw_signature(utf8(b"https"), utf8(b"2025-05-02T16:17:10.714Z"), signature); + let ethereum_address = b"0xC7B576Ead6aFb962E2DEcB35814FB29723AEC98a"; + let domain = b"localhost:3001"; + let abstract_public_key = create_abstract_public_key(ethereum_address, domain); + let auth_data = create_derivable_auth_data(digest, abstract_signature, abstract_public_key); + let entry_function_name = b"0x1::aptos_account::transfer"; + authenticate_auth_data(auth_data, &entry_function_name); + } + + #[test(framework = @0x1)] + #[expected_failure(abort_code = EINVALID_SIGNATURE)] + fun test_authenticate_auth_data_invalid_signature(framework: &signer) { + chain_id::initialize_for_test(framework, 4); + + let digest = x"2a2f07c32382a94aa90ddfdb97076b77d779656bb9730c4f3e4d22a30df298dd"; + let signature = vector[ + 248, 247, 194, 250, 31, 233, 100, 234, 109, 142, 6, 193, 203, 33, 147, 199, + 236, 117, 69, 119, 252, 219, 150, 143, 28, 112, 33, 9, 95, 53, 0, 69, + 123, 17, 207, 53, 69, 203, 213, 208, 13, 98, 225, 170, 28, 183, 181, 53, + 58, 209, 105, 56, 204, 253, 73, 82, 201, 197, 201, 139, 201, 19, 65, 215, + 28 + ]; + let abstract_signature = create_raw_signature(utf8(b"https"), utf8(b"2025-01-01T00:00:00.000Z"), signature); + let ethereum_address = b"0xC7B576Ead6aFb962E2DEcB35814FB29723AEC98a"; + let domain = b"localhost:3001"; + let abstract_public_key = create_abstract_public_key(ethereum_address, domain); + let auth_data = create_derivable_auth_data(digest, abstract_signature, abstract_public_key); + let entry_function_name = b"0x1::aptos_account::transfer"; + authenticate_auth_data(auth_data, &entry_function_name); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/solana_derivable_account.move b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/solana_derivable_account.move new file mode 100644 index 0000000000000..972a095a173b0 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/common_account_abstractions/solana_derivable_account.move @@ -0,0 +1,313 @@ +/// Derivable account abstraction that verifies a message signed by +/// SIWS. +/// 1. The message format is as follows: +/// +/// wants you to sign in with your Solana account: +/// +/// +/// Please confirm you explicitly initiated this request from . You are approving to execute transaction on Aptos blockchain (). +/// +/// Nonce: +/// +/// 2. The abstract public key is a BCS serialized `SIWSAbstractPublicKey`. +/// 3. The abstract signature is a BCS serialized `SIWSAbstractSignature`. +/// 4. This module has been tested for the following wallets: +/// - Phantom +/// - Solflare +/// - Backpack +/// - OKX +module aptos_framework::solana_derivable_account { + use aptos_framework::auth_data::AbstractionAuthData; + use aptos_framework::common_account_abstractions_utils::{network_name, entry_function_name}; + use aptos_std::ed25519::{ + Self, + new_signature_from_bytes, + new_validated_public_key_from_bytes, + public_key_into_unvalidated, + }; + use std::bcs_stream::{Self, deserialize_u8}; + use std::string_utils; + use std::transaction_context; + use std::vector; + + /// Signature failed to verify. + const EINVALID_SIGNATURE: u64 = 1; + /// Non base58 character found in public key. + const EINVALID_BASE_58_PUBLIC_KEY: u64 = 2; + /// Entry function payload is missing. + const EMISSING_ENTRY_FUNCTION_PAYLOAD: u64 = 3; + /// Invalid signature type. + const EINVALID_SIGNATURE_TYPE: u64 = 4; + /// Invalid public key. + const EINVALID_PUBLIC_KEY: u64 = 5; + /// Invalid public key length. + const EINVALID_PUBLIC_KEY_LENGTH: u64 = 6; + + // a 58-character alphabet consisting of numbers (1-9) and almost all (A-Z, a-z) letters, + // excluding 0, O, I, and l to avoid confusion between similar-looking characters. + const BASE_58_ALPHABET: vector = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"; + const HEX_ALPHABET: vector = b"0123456789abcdef"; + const PUBLIC_KEY_NUM_BYTES: u64 = 32; + + enum SIWSAbstractSignature has drop { + MessageV1 { + signature: vector, + }, + } + + /// Deserializes the abstract public key which is supposed to be a bcs + /// serialized `SIWSAbstractPublicKey`. The base58_public_key is + /// represented in UTF8. We prefer this format because it's computationally + /// cheaper to decode a base58 string than to encode from raw bytes. We + /// require both the base58 public key in UTF8 to construct the message and + /// the raw bytes version to do signature verification. + fun deserialize_abstract_public_key(abstract_public_key: &vector): + (vector, vector) { + let stream = bcs_stream::new(*abstract_public_key); + let base58_public_key = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + let domain = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + (base58_public_key, domain) + } + + /// Returns a tuple of the signature type and the signature. + fun deserialize_abstract_signature(abstract_signature: &vector): SIWSAbstractSignature { + let stream = bcs_stream::new(*abstract_signature); + let signature_type = bcs_stream::deserialize_u8(&mut stream); + if (signature_type == 0x00) { + let signature = bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)); + SIWSAbstractSignature::MessageV1 { signature } + } else { + abort(EINVALID_SIGNATURE_TYPE) + } + } + + fun construct_message( + base58_public_key: &vector, + domain: &vector, + entry_function_name: &vector, + digest_utf8: &vector, + ): vector { + let message = &mut vector[]; + message.append(*domain); + message.append(b" wants you to sign in with your Solana account:\n"); + message.append(*base58_public_key); + message.append(b"\n\nPlease confirm you explicitly initiated this request from "); + message.append(*domain); + message.append(b"."); + message.append(b" You are approving to execute transaction "); + message.append(*entry_function_name); + message.append(b" on Aptos blockchain"); + let network_name = network_name(); + message.append(b" ("); + message.append(network_name); + message.append(b")"); + message.append(b"."); + message.append(b"\n\nNonce: "); + message.append(*digest_utf8); + *message + } + + spec to_public_key_bytes { + ensures result.length() == PUBLIC_KEY_NUM_BYTES; + } + + fun to_public_key_bytes(base58_public_key: &vector): vector { + let bytes = vector[0u8]; + let base = 58u16; + + let i = 0; + while (i < base58_public_key.length()) { + let char = base58_public_key[i]; + let (found, char_index) = BASE_58_ALPHABET.index_of(&char); + assert!(found, EINVALID_BASE_58_PUBLIC_KEY); + + let j = 0; + let carry = (char_index as u16); + + // For each existing byte, multiply by 58 and add carry + while (j < bytes.length()) { + let current = (bytes[j] as u16); + let new_carry = current * base + carry; + bytes[j] = ((new_carry & 0xff) as u8); + carry = new_carry >> 8; + j = j + 1; + }; + + // Add any remaining carry as new bytes + while (carry > 0) { + bytes.push_back((carry & 0xff) as u8); + carry = carry >> 8; + }; + + i = i + 1; + }; + + // Handle leading zeros (1's in Base58) + let i = 0; + while (i < base58_public_key.length() && base58_public_key[i] == 49) { // '1' is 49 in ASCII + bytes.push_back(0); + i = i + 1; + }; + + vector::reverse(&mut bytes); + assert!(bytes.length() == PUBLIC_KEY_NUM_BYTES, EINVALID_PUBLIC_KEY_LENGTH); + bytes + } + + spec authenticate_auth_data { + // TODO: Issue with `cannot appear in both arithmetic and bitwise + // operation` + pragma verify = false; + } + + fun authenticate_auth_data( + aa_auth_data: AbstractionAuthData, + entry_function_name: &vector + ) { + let abstract_public_key = aa_auth_data.derivable_abstract_public_key(); + let (base58_public_key, domain) = deserialize_abstract_public_key(abstract_public_key); + let digest_utf8 = string_utils::to_string(aa_auth_data.digest()).bytes(); + + let public_key_bytes = to_public_key_bytes(&base58_public_key); + let public_key = new_validated_public_key_from_bytes(public_key_bytes); + assert!(public_key.is_some(), EINVALID_PUBLIC_KEY); + let abstract_signature = deserialize_abstract_signature(aa_auth_data.derivable_abstract_signature()); + match (abstract_signature) { + SIWSAbstractSignature::MessageV1 { signature: signature_bytes } => { + let message = construct_message(&base58_public_key, &domain, entry_function_name, digest_utf8); + + let signature = new_signature_from_bytes(signature_bytes); + assert!( + ed25519::signature_verify_strict( + &signature, + &public_key_into_unvalidated(public_key.destroy_some()), + message, + ), + EINVALID_SIGNATURE + ); + }, + }; + } + + spec authenticate { + // TODO: Issue with spec for authenticate_auth_data + pragma verify = false; + } + + /// Authorization function for domain account abstraction. + public fun authenticate(account: signer, aa_auth_data: AbstractionAuthData): signer { + let maybe_entry_function_payload = transaction_context::entry_function_payload(); + if (maybe_entry_function_payload.is_some()) { + let entry_function_payload = maybe_entry_function_payload.destroy_some(); + let entry_function_name = entry_function_name(&entry_function_payload); + authenticate_auth_data(aa_auth_data, &entry_function_name); + account + } else { + abort(EMISSING_ENTRY_FUNCTION_PAYLOAD) + } + } + + #[test_only] + use std::bcs; + #[test_only] + use std::string::{String, utf8}; + #[test_only] + use aptos_framework::auth_data::{create_derivable_auth_data}; + #[test_only] + use std::chain_id; + + #[test_only] + struct SIWSAbstractPublicKey has drop { + base58_public_key: String, + domain: String, + } + + #[test_only] + fun create_abstract_public_key(base58_public_key: String, domain: String): vector { + let abstract_public_key = SIWSAbstractPublicKey { + base58_public_key, + domain, + }; + bcs::to_bytes(&abstract_public_key) + } + + #[test_only] + fun create_message_v1_signature(signature: vector): vector { + let abstract_signature = SIWSAbstractSignature::MessageV1 { signature }; + bcs::to_bytes(&abstract_signature) + } + + #[test] + fun test_deserialize_abstract_public_key() { + let base58_public_key = b"G56zT1K6AQab7FzwHdQ8hiHXusR14Rmddw6Vz5MFbbmV"; + let domain = b"aptos-labs.github.io"; + let abstract_public_key = create_abstract_public_key(utf8(base58_public_key), utf8(domain)); + let (public_key, domain) = deserialize_abstract_public_key(&abstract_public_key); + assert!(public_key == base58_public_key); + assert!(domain == domain); + } + + #[test] + fun test_deserialize_abstract_signature() { + let signature_bytes = vector[129, 0, 6, 135, 53, 153, 88, 201, 243, 227, 13, 232, 192, 42, 167, 94, 3, 120, 49, 80, 102, 193, 61, 211, 189, 83, 37, 121, 5, 216, 30, 25, 243, 207, 172, 248, 94, 201, 123, 66, 237, 66, 122, 201, 171, 215, 162, 187, 218, 188, 24, 165, 52, 147, 210, 39, 128, 78, 62, 81, 73, 167, 235, 1]; + let abstract_signature = create_message_v1_signature(signature_bytes); + let siws_abstract_signature = deserialize_abstract_signature(&abstract_signature); + assert!(siws_abstract_signature is SIWSAbstractSignature::MessageV1); + match (siws_abstract_signature) { + SIWSAbstractSignature::MessageV1 { signature } => assert!(signature == signature_bytes), + }; + } + + #[test(framework = @0x1)] + fun test_construct_message(framework: &signer) { + chain_id::initialize_for_test(framework, 2); + + let base58_public_key = b"G56zT1K6AQab7FzwHdQ8hiHXusR14Rmddw6Vz5MFbbmV"; + let domain = b"localhost:3000"; + let entry_function_name = b"0x1::coin::transfer"; + let digest_utf8 = b"0x9509edc861070b2848d8161c9453159139f867745dc87d32864a71e796c7d279"; + let message = construct_message(&base58_public_key, &domain, &entry_function_name, &digest_utf8); + assert!(message == b"localhost:3000 wants you to sign in with your Solana account:\nG56zT1K6AQab7FzwHdQ8hiHXusR14Rmddw6Vz5MFbbmV\n\nPlease confirm you explicitly initiated this request from localhost:3000. You are approving to execute transaction 0x1::coin::transfer on Aptos blockchain (testnet).\n\nNonce: 0x9509edc861070b2848d8161c9453159139f867745dc87d32864a71e796c7d279"); + } + + #[test] + fun test_to_public_key_bytes() { + let base58_public_key = b"G56zT1K6AQab7FzwHdQ8hiHXusR14Rmddw6Vz5MFbbmV"; + let base64_public_key = to_public_key_bytes(&base58_public_key); + + assert!(base64_public_key == vector[223, 236, 102, 141, 171, 166, 118, + 40, 172, 65, 89, 139, 197, 164, 172, 50, 133, 204, 100, 93, 136, 195, + 58, 158, 31, 22, 219, 93, 60, 40, 175, 12]); + } + + #[test(framework = @0x1)] + fun test_authenticate_auth_data(framework: &signer) { + chain_id::initialize_for_test(framework, 4); + + let digest = x"9800ae3d949260dedd01573b2903e9de06abe914530ba5d21f068f8823bfdfa3"; + let signature = vector[70, 135, 9, 250, 23, 189, 162, 119, 77, 133, 195, 66, 102, 105, 116, 86, 29, 118, 226, 100, 94, 120, 138, 219, 252, 134, 231, 139, 47, 77, 19, 201, 4, 88, 255, 64, 185, 96, 134, 50, 27, 30, 110, 125, 251, 89, 57, 156, 17, 170, 16, 102, 107, 40, 46, 234, 15, 162, 156, 69, 132, 70, 135, 11]; + let abstract_signature = create_message_v1_signature(signature); + let base58_public_key = b"Awrh7Cfvx5gc7Ua93hdmmni6KWvkJgH4HwMkixTxmxe"; + let domain = b"localhost:3001"; + let abstract_public_key = create_abstract_public_key(utf8(base58_public_key), utf8(domain)); + let auth_data = create_derivable_auth_data(digest, abstract_signature, abstract_public_key); + let entry_function_name = b"0x1::aptos_account::transfer"; + authenticate_auth_data(auth_data, &entry_function_name); + } + + #[test(framework = @0x1)] + #[expected_failure(abort_code = EINVALID_SIGNATURE)] + fun test_authenticate_auth_data_invalid_signature(framework: &signer) { + chain_id::initialize_for_test(framework, 4); + + let digest = x"9800ae3d949260dedd01573b2903e9de06abe914530ba5d21f068f8823bfdfa3"; + let signature = vector[71, 135, 9, 250, 23, 189, 162, 119, 77, 133, 195, 66, 102, 105, 116, 86, 29, 118, 226, 100, 94, 120, 138, 219, 252, 134, 231, 139, 47, 77, 19, 201, 4, 88, 255, 64, 185, 96, 134, 50, 27, 30, 110, 125, 251, 89, 57, 156, 17, 170, 16, 102, 107, 40, 46, 234, 15, 162, 156, 69, 132, 70, 135, 11]; + let abstract_signature = create_message_v1_signature(signature); + let base58_public_key = b"Awrh7Cfvx5gc7Ua93hdmmni6KWvkJgH4HwMkixTxmxe"; + let domain = b"localhost:3001"; + let abstract_public_key = create_abstract_public_key(utf8(base58_public_key), utf8(domain)); + let auth_data = create_derivable_auth_data(digest, abstract_signature, abstract_public_key); + let entry_function_name = b"0x1::aptos_account::transfer"; + authenticate_auth_data(auth_data, &entry_function_name); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/account/permissioned_delegation.move b/aptos-move/framework/aptos-framework/sources/account/permissioned_delegation.move new file mode 100644 index 0000000000000..c286870ac991a --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/permissioned_delegation.move @@ -0,0 +1,210 @@ +module aptos_framework::permissioned_delegation { + use std::error; + use std::option::Option; + use std::signer; + use aptos_std::ed25519::{ + Self, + new_signature_from_bytes, + new_unvalidated_public_key_from_bytes, + UnvalidatedPublicKey + }; + use aptos_std::big_ordered_map::{Self, BigOrderedMap}; + use aptos_framework::auth_data::{Self, AbstractionAuthData}; + use aptos_framework::bcs_stream::{Self, deserialize_u8}; + use aptos_framework::permissioned_signer::{Self, is_permissioned_signer, StorablePermissionedHandle}; + use aptos_framework::rate_limiter; + use aptos_framework::rate_limiter::RateLimiter; + #[test_only] + use std::bcs; + #[test_only] + use std::option; + + const ENOT_MASTER_SIGNER: u64 = 1; + const EINVALID_PUBLIC_KEY: u64 = 2; + const EPUBLIC_KEY_NOT_FOUND: u64 = 3; + const EINVALID_SIGNATURE: u64 = 4; + const EDELEGATION_EXISTENCE: u64 = 5; + const ERATE_LIMITED: u64 = 6; + + enum AccountDelegation has store { + V1 { handle: StorablePermissionedHandle, rate_limiter: Option } + } + + enum DelegationKey has copy, store, drop { + Ed25519PublicKey(UnvalidatedPublicKey) + } + + public fun gen_ed25519_key(key: UnvalidatedPublicKey): DelegationKey { + DelegationKey::Ed25519PublicKey(key) + } + + struct RegisteredDelegations has key { + delegations: BigOrderedMap + } + + inline fun check_txn_rate(bundle: &mut AccountDelegation, check_rate_limit: bool) { + let token_bucket = &mut bundle.rate_limiter; + if (check_rate_limit && token_bucket.is_some()) { + assert!(rate_limiter::request(token_bucket.borrow_mut(), 1), std::error::permission_denied(ERATE_LIMITED)); + }; + } + + public fun add_permissioned_handle( + master: &signer, + key: DelegationKey, + rate_limiter: Option, + expiration_time: u64, + ): signer acquires RegisteredDelegations { + assert!(!is_permissioned_signer(master), error::permission_denied(ENOT_MASTER_SIGNER)); + let addr = signer::address_of(master); + if (!exists(addr)) { + move_to(master, RegisteredDelegations { + delegations: big_ordered_map::new_with_config(50, 20, false) + }); + }; + let handles = &mut RegisteredDelegations[addr].delegations; + assert!(!handles.contains(&key), error::already_exists(EDELEGATION_EXISTENCE)); + let handle = permissioned_signer::create_storable_permissioned_handle(master, expiration_time); + let permissioned_signer = permissioned_signer::signer_from_storable_permissioned_handle(&handle); + handles.add(key, AccountDelegation::V1 { handle, rate_limiter }); + permissioned_signer + } + + public fun remove_permissioned_handle( + master: &signer, + key: DelegationKey, + ) acquires RegisteredDelegations { + assert!(!is_permissioned_signer(master), error::permission_denied(ENOT_MASTER_SIGNER)); + let addr = signer::address_of(master); + let delegations = &mut RegisteredDelegations[addr].delegations; + assert!(delegations.contains(&key), error::not_found(EDELEGATION_EXISTENCE)); + let delegation = delegations.remove(&key); + match (delegation) { + AccountDelegation::V1 { handle, rate_limiter: _ } => { + permissioned_signer::destroy_storable_permissioned_handle(handle); + } + }; + } + + public fun permissioned_signer_by_key( + master: &signer, + key: DelegationKey, + ): signer acquires RegisteredDelegations { + assert!(!is_permissioned_signer(master), error::permission_denied(ENOT_MASTER_SIGNER)); + let addr = signer::address_of(master); + let handle = get_storable_permissioned_handle(addr, key, false); + permissioned_signer::signer_from_storable_permissioned_handle(handle) + } + + public fun handle_address_by_key(master: address, key: DelegationKey): address acquires RegisteredDelegations { + let handle = get_storable_permissioned_handle(master, key, false); + permissioned_signer::permissions_storage_address(handle) + } + + /// Authorization function for account abstraction. + public fun authenticate( + account: signer, + abstraction_auth_data: AbstractionAuthData + ): signer acquires RegisteredDelegations { + let addr = signer::address_of(&account); + let stream = bcs_stream::new(*auth_data::authenticator(&abstraction_auth_data)); + let public_key = new_unvalidated_public_key_from_bytes( + bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)) + ); + let signature = new_signature_from_bytes( + bcs_stream::deserialize_vector(&mut stream, |x| deserialize_u8(x)) + ); + assert!( + ed25519::signature_verify_strict( + &signature, + &public_key, + *auth_data::digest(&abstraction_auth_data), + ), + error::permission_denied(EINVALID_SIGNATURE) + ); + let handle = get_storable_permissioned_handle(addr, DelegationKey::Ed25519PublicKey(public_key), true); + permissioned_signer::signer_from_storable_permissioned_handle(handle) + } + + inline fun get_storable_permissioned_handle( + master: address, + key: DelegationKey, + count_rate: bool + ): &StorablePermissionedHandle { + if (exists(master)) { + let delegations = &mut RegisteredDelegations[master].delegations; + if (delegations.contains(&key)) { + let delegation = delegations.remove(&key); + check_txn_rate(&mut delegation, count_rate); + delegations.add(key, delegation); + &delegations.borrow(&key).handle + } else { + abort error::permission_denied(EINVALID_SIGNATURE) + } + } else { + abort error::permission_denied(EINVALID_SIGNATURE) + } + } + + /// + spec module { + // TODO: fix verification + pragma verify = false; + } + + #[test_only] + use aptos_std::ed25519::{sign_arbitrary_bytes, generate_keys, validated_public_key_to_bytes, Signature, + public_key_into_unvalidated + }; + #[test_only] + use aptos_framework::account::create_signer_for_test; + #[test_only] + use aptos_framework::timestamp; + + #[test_only] + struct SignatureBundle has drop { + pubkey: UnvalidatedPublicKey, + signature: Signature, + } + + #[test(account = @0xcafe, account_copy = @0xcafe)] + fun test_basics(account: signer, account_copy: signer) acquires RegisteredDelegations { + let aptos_framework = create_signer_for_test(@aptos_framework); + timestamp::set_time_has_started_for_testing(&aptos_framework); + let (sk, vpk) = generate_keys(); + let signature = sign_arbitrary_bytes(&sk, vector[1, 2, 3]); + let pubkey_bytes = validated_public_key_to_bytes(&vpk); + let key = DelegationKey::Ed25519PublicKey(public_key_into_unvalidated(vpk)); + let sig_bundle = SignatureBundle { + pubkey: new_unvalidated_public_key_from_bytes(pubkey_bytes), + signature, + }; + let auth_data = auth_data::create_auth_data(vector[1, 2, 3], bcs::to_bytes(&sig_bundle)); + assert!(!is_permissioned_signer(&account)); + add_permissioned_handle(&account, key, option::none(), 60); + let permissioned_signer = authenticate(account, auth_data); + assert!(is_permissioned_signer(&permissioned_signer)); + remove_permissioned_handle(&account_copy, key); + } + + #[test(account = @0xcafe, account_copy = @0xcafe, account_copy_2 = @0xcafe)] + #[expected_failure(abort_code = 0x50006, location = Self)] + fun test_rate_limit(account: signer, account_copy: signer, account_copy_2: signer) acquires RegisteredDelegations { + let aptos_framework = create_signer_for_test(@aptos_framework); + timestamp::set_time_has_started_for_testing(&aptos_framework); + let (sk, vpk) = generate_keys(); + let signature = sign_arbitrary_bytes(&sk, vector[1, 2, 3]); + let pubkey_bytes = validated_public_key_to_bytes(&vpk); + let key = DelegationKey::Ed25519PublicKey(public_key_into_unvalidated(vpk)); + let sig_bundle = SignatureBundle { + pubkey: new_unvalidated_public_key_from_bytes(pubkey_bytes), + signature, + }; + let auth_data = auth_data::create_auth_data(vector[1, 2, 3], bcs::to_bytes(&sig_bundle)); + assert!(!is_permissioned_signer(&account)); + add_permissioned_handle(&account, key, option::some(rate_limiter::initialize(1, 10)), 60); + authenticate(account, auth_data); + authenticate(account_copy, auth_data); + remove_permissioned_handle(&account_copy_2, key); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/account/rate_limiter.move b/aptos-move/framework/aptos-framework/sources/account/rate_limiter.move new file mode 100644 index 0000000000000..68c2ccc98501f --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/account/rate_limiter.move @@ -0,0 +1,172 @@ +module aptos_framework::rate_limiter { + use aptos_framework::timestamp; + + enum RateLimiter has key, store, copy, drop { + // Struct to represent a Token Bucket that refills every minute + TokenBucket { + // Maximum number of tokens allowed at any time. + capacity: u64, + // Current number of tokens remaining in this interval. + current_amount: u64, + // refill `capacity` number of tokens every `refill_interval` in seconds. + refill_interval: u64, + // Last time the bucket was refilled (in seconds) + last_refill_timestamp: u64, + // accumulated amount that hasn't yet added up to a full token + fractional_accumulated: u64, + } + } + + // Public entry function to initialize a Token Bucket based rate limiter. + public fun initialize(capacity: u64, refill_interval: u64): RateLimiter { + RateLimiter::TokenBucket { + capacity, + current_amount: capacity, // Start with a full bucket (full capacity of transactions allowed) + refill_interval, + last_refill_timestamp: timestamp::now_seconds(), + fractional_accumulated: 0, // Start with no fractional accumulated + } + } + + // Public function to request a transaction from the bucket + public fun request(limiter: &mut RateLimiter, num_token_requested: u64): bool { + refill(limiter); + if (limiter.current_amount >= num_token_requested) { + limiter.current_amount = limiter.current_amount - num_token_requested; + true + } else { + false + } + } + + // Function to refill the transactions in the bucket based on time passed + fun refill(limiter: &mut RateLimiter) { + let current_time = timestamp::now_seconds(); + let time_passed = current_time - limiter.last_refill_timestamp; + // Calculate the full tokens that can be added + let accumulated_amount = time_passed * limiter.capacity + limiter.fractional_accumulated; + let new_tokens = accumulated_amount / limiter.refill_interval; + if (limiter.current_amount + new_tokens >= limiter.capacity) { + limiter.current_amount = limiter.capacity; + limiter.fractional_accumulated = 0; + } else { + limiter.current_amount = limiter.current_amount + new_tokens; + // Update the fractional amount accumulated for the next refill cycle + limiter.fractional_accumulated = accumulated_amount % limiter.refill_interval; + }; + limiter.last_refill_timestamp = current_time; + } + + #[test(aptos_framework = @0x1)] + fun test_initialize_bucket(aptos_framework: &signer) { + timestamp::set_time_has_started_for_testing(aptos_framework); + let bucket = initialize(10, 60); + assert!(bucket.capacity == 10, 100); + assert!(bucket.current_amount == 10, 101); + assert!(bucket.refill_interval == 60, 102); + } + + #[test(aptos_framework = @0x1)] + fun test_request_success(aptos_framework: &signer) { + timestamp::set_time_has_started_for_testing(aptos_framework); + let bucket = initialize(10, 30); + let success = request(&mut bucket, 5); + assert!(success, 200); // Should succeed since 5 <= 10 + assert!(bucket.current_amount == 5, 201); // Remaining tokens should be 5 + } + + #[test(aptos_framework = @0x1)] + fun test_request_failure(aptos_framework: &signer) { + timestamp::set_time_has_started_for_testing(aptos_framework); + let bucket = initialize(10, 30); + let success = request(&mut bucket, 15); + assert!(!success, 300); // Should fail since 15 > 10 + assert!(bucket.current_amount == 10, 301); // Tokens should remain unchanged + } + + #[test(aptos_framework = @0x1)] + fun test_refill(aptos_framework: &signer) { + timestamp::set_time_has_started_for_testing(aptos_framework); + let bucket = initialize(10, 60); + + // Simulate a passage of 31 seconds + timestamp::update_global_time_for_test_secs(timestamp::now_seconds() + 31); + + // Refill the bucket + refill(&mut bucket); + + // Should have refilled 5 tokens (half of the capacity), + // but bucket was already full, so should remain full + assert!(bucket.current_amount == 10, 400); + assert!(bucket.fractional_accumulated == 0, 401); + + // Request 5 tokens + let success = request(&mut bucket, 5); + assert!(success, 401); // Request should succeed + assert!(bucket.current_amount == 5, 402); // Remaining tokens should be 5 + assert!(bucket.fractional_accumulated == 0, 403); + + // Simulate another passage of 23 seconds + timestamp::update_global_time_for_test_secs(timestamp::now_seconds() + 23); + + // Refill again + refill(&mut bucket); + + // Should refill 3 tokens + assert!(bucket.current_amount == 8, 403); + // and have 230-180 leftover + assert!(bucket.fractional_accumulated == 50, 404); + } + + #[test(aptos_framework= @0x1)] + fun test_fractional_accumulation(aptos_framework: &signer) { + timestamp::set_time_has_started_for_testing(aptos_framework); + let bucket = initialize(10, 60); + assert!(request(&mut bucket, 10), 1); // Request should succeed + + assert!(bucket.current_amount == 0, 500); // No token will be added since it rounds down + + // Simulate 10 seconds passing + timestamp::update_global_time_for_test_secs(timestamp::now_seconds() + 10); + + // Refill the bucket + refill(&mut bucket); + // Should add 1/6th of the tokens (because 10 seconds is 1/6th of a minute) + assert!(bucket.current_amount == 1, 500); // 1 token will be added since it rounds down + assert!(bucket.fractional_accumulated == 40, 501); // Accumulate the 4 seconds of fractional amount + + // Simulate another 50 seconds passing (total 60 seconds) + timestamp::update_global_time_for_test_secs(timestamp::now_seconds() + 50); + + // Refill the bucket again + refill(&mut bucket); + + assert!(bucket.current_amount == 10, 502); // Should be full now + assert!(bucket.fractional_accumulated == 0, 503); // Fractional time should reset + } + + #[test(aptos_framework= @0x1)] + fun test_multiple_refills(aptos_framework: &signer) { + timestamp::set_time_has_started_for_testing(aptos_framework); + let bucket = initialize(10, 60); + + // Request 8 tokens + let success = request(&mut bucket, 8); + assert!(success, 600); // Should succeed + assert!(bucket.current_amount == 2, 601); // Remaining tokens should be 2 + + // Simulate a passage of 30 seconds + timestamp::update_global_time_for_test_secs(timestamp::now_seconds() + 30); + + // Refill the bucket + refill(&mut bucket); + assert!(bucket.current_amount == 7, 602); // Should add 5 tokens (half of the refill rate) + + // Simulate another 30 seconds + timestamp::update_global_time_for_test_secs(timestamp::now_seconds() + 30); + + // Refill the bucket again + refill(&mut bucket); + assert!(bucket.current_amount == 10, 603); // Should be full again + } +} diff --git a/aptos-move/framework/aptos-framework/sources/aggregator/aggregator.spec.move b/aptos-move/framework/aptos-framework/sources/aggregator/aggregator.spec.move index 76f00feba3f7e..d29b6cb1bb8a6 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator/aggregator.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator/aggregator.spec.move @@ -59,10 +59,9 @@ spec aptos_framework::aggregator { } spec limit { - pragma opaque; + pragma intrinsic; /// [high-level-req-1.2] - aborts_if false; - ensures [abstract] result == spec_get_limit(aggregator); + aborts_if [abstract] false; } spec native fun spec_read(aggregator: Aggregator): u128; diff --git a/aptos-move/framework/aptos-framework/sources/aggregator/aggregator_factory.move b/aptos-move/framework/aptos-framework/sources/aggregator/aggregator_factory.move index 7ec4dad805e2a..c7192e646d900 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator/aggregator_factory.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator/aggregator_factory.move @@ -16,6 +16,11 @@ module aptos_framework::aggregator_factory { /// Aggregator factory is not published yet. const EAGGREGATOR_FACTORY_NOT_FOUND: u64 = 1; + /// Aggregator V1 only supports limit == MAX_U128. + const EAGG_V1_LIMIT_DEPRECATED: u64 = 2; + + const MAX_U128: u128 = 340282366920938463463374607431768211455; + /// Creates new aggregators. Used to control the numbers of aggregators in the /// system and who can create them. At the moment, only Aptos Framework (0x1) /// account can. @@ -33,27 +38,39 @@ module aptos_framework::aggregator_factory { } /// Creates a new aggregator instance which overflows on exceeding a `limit`. - public(friend) fun create_aggregator_internal(limit: u128): Aggregator acquires AggregatorFactory { + public(friend) fun create_aggregator_internal(): Aggregator acquires AggregatorFactory { assert!( exists(@aptos_framework), error::not_found(EAGGREGATOR_FACTORY_NOT_FOUND) ); let aggregator_factory = borrow_global_mut(@aptos_framework); - new_aggregator(aggregator_factory, limit) + new_aggregator(aggregator_factory, MAX_U128) } + #[deprecated] /// This is currently a function closed for public. This can be updated in the future by on-chain governance /// to allow any signer to call. public fun create_aggregator(account: &signer, limit: u128): Aggregator acquires AggregatorFactory { + // deprecated. Currently used only in aptos-move/e2e-move-tests/src/tests/aggregator.data/pack/sources/aggregator_test.move + // Only Aptos Framework (0x1) account can call this for now. system_addresses::assert_aptos_framework(account); - create_aggregator_internal(limit) + assert!( + limit == MAX_U128, + error::invalid_argument(EAGG_V1_LIMIT_DEPRECATED) + ); + create_aggregator_internal() } /// Returns a new aggregator. native fun new_aggregator(aggregator_factory: &mut AggregatorFactory, limit: u128): Aggregator; + #[test_only] + public fun create_aggregator_for_test(): Aggregator acquires AggregatorFactory { + create_aggregator_internal() + } + #[test_only] public fun initialize_aggregator_factory_for_test(aptos_framework: &signer) { initialize_aggregator_factory(aptos_framework); diff --git a/aptos-move/framework/aptos-framework/sources/aggregator/aggregator_factory.spec.move b/aptos-move/framework/aptos-framework/sources/aggregator/aggregator_factory.spec.move index 9ffe7a78d2dde..7606e54d49dc0 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator/aggregator_factory.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator/aggregator_factory.spec.move @@ -54,10 +54,10 @@ spec aptos_framework::aggregator_factory { ensures exists(addr); } - spec create_aggregator_internal(limit: u128): Aggregator { + spec create_aggregator_internal(): Aggregator { /// [high-level-req-2] include CreateAggregatorInternalAbortsIf; - ensures aggregator::spec_get_limit(result) == limit; + ensures aggregator::spec_get_limit(result) == MAX_U128; ensures aggregator::spec_aggregator_get_val(result) == 0; } spec schema CreateAggregatorInternalAbortsIf { @@ -71,6 +71,7 @@ spec aptos_framework::aggregator_factory { let addr = signer::address_of(account); /// [high-level-req-3] aborts_if addr != @aptos_framework; + aborts_if limit != MAX_U128; aborts_if !exists(@aptos_framework); } diff --git a/aptos-move/framework/aptos-framework/sources/aggregator/optional_aggregator.move b/aptos-move/framework/aptos-framework/sources/aggregator/optional_aggregator.move index f3a545600edbc..a5d9794862c52 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator/optional_aggregator.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator/optional_aggregator.move @@ -16,6 +16,11 @@ module aptos_framework::optional_aggregator { /// Aggregator feature is not supported. Raised by native code. const EAGGREGATOR_UNDERFLOW: u64 = 2; + /// OptionalAggregator (Agg V1) switch not supported any more. + const ESWITCH_DEPRECATED: u64 = 3; + + const MAX_U128: u128 = 340282366920938463463374607431768211455; + /// Wrapper around integer with a custom overflow limit. Supports add, subtract and read just like `Aggregator`. struct Integer has store { value: u128, @@ -69,61 +74,23 @@ module aptos_framework::optional_aggregator { } /// Creates a new optional aggregator. - public(friend) fun new(limit: u128, parallelizable: bool): OptionalAggregator { + public(friend) fun new(parallelizable: bool): OptionalAggregator { if (parallelizable) { OptionalAggregator { - aggregator: option::some(aggregator_factory::create_aggregator_internal(limit)), + aggregator: option::some(aggregator_factory::create_aggregator_internal()), integer: option::none(), } } else { OptionalAggregator { aggregator: option::none(), - integer: option::some(new_integer(limit)), + integer: option::some(new_integer(MAX_U128)), } } } /// Switches between parallelizable and non-parallelizable implementations. - public fun switch(optional_aggregator: &mut OptionalAggregator) { - let value = read(optional_aggregator); - switch_and_zero_out(optional_aggregator); - add(optional_aggregator, value); - } - - /// Switches between parallelizable and non-parallelizable implementations, setting - /// the value of the new optional aggregator to zero. - fun switch_and_zero_out(optional_aggregator: &mut OptionalAggregator) { - if (is_parallelizable(optional_aggregator)) { - switch_to_integer_and_zero_out(optional_aggregator); - } else { - switch_to_aggregator_and_zero_out(optional_aggregator); - } - } - - /// Switches from parallelizable to non-parallelizable implementation, zero-initializing - /// the value. - fun switch_to_integer_and_zero_out( - optional_aggregator: &mut OptionalAggregator - ): u128 { - let aggregator = option::extract(&mut optional_aggregator.aggregator); - let limit = aggregator::limit(&aggregator); - aggregator::destroy(aggregator); - let integer = new_integer(limit); - option::fill(&mut optional_aggregator.integer, integer); - limit - } - - /// Switches from non-parallelizable to parallelizable implementation, zero-initializing - /// the value. - fun switch_to_aggregator_and_zero_out( - optional_aggregator: &mut OptionalAggregator - ): u128 { - let integer = option::extract(&mut optional_aggregator.integer); - let limit = limit(&integer); - destroy_integer(integer); - let aggregator = aggregator_factory::create_aggregator_internal(limit); - option::fill(&mut optional_aggregator.aggregator, aggregator); - limit + public fun switch(_optional_aggregator: &mut OptionalAggregator) { + abort error::invalid_state(ESWITCH_DEPRECATED) } /// Destroys optional aggregator. @@ -192,10 +159,19 @@ module aptos_framework::optional_aggregator { } #[test(account = @aptos_framework)] - fun optional_aggregator_test(account: signer) { + #[expected_failure(abort_code = 0x030003, location = Self)] + fun optional_aggregator_swith_fail_test(account: signer) { + aggregator_factory::initialize_aggregator_factory(&account); + let aggregator = new(true); + switch(&mut aggregator); + destroy(aggregator); + } + + #[test(account = @aptos_framework)] + fun optional_aggregator_test_integer(account: signer) { aggregator_factory::initialize_aggregator_factory(&account); - let aggregator = new(30, false); + let aggregator = new(false); assert!(!is_parallelizable(&aggregator), 0); add(&mut aggregator, 12); @@ -205,9 +181,27 @@ module aptos_framework::optional_aggregator { sub(&mut aggregator, 10); assert!(read(&aggregator) == 5, 0); - // Switch to parallelizable aggregator and check the value is preserved. - switch(&mut aggregator); + add(&mut aggregator, 12); + add(&mut aggregator, 3); + assert!(read(&aggregator) == 20, 0); + + sub(&mut aggregator, 10); + assert!(read(&aggregator) == 10, 0); + + destroy(aggregator); + } + + #[test(account = @aptos_framework)] + fun optional_aggregator_test_aggregator(account: signer) { + aggregator_factory::initialize_aggregator_factory(&account); + let aggregator = new(true); assert!(is_parallelizable(&aggregator), 0); + + add(&mut aggregator, 12); + add(&mut aggregator, 3); + assert!(read(&aggregator) == 15, 0); + + sub(&mut aggregator, 10); assert!(read(&aggregator) == 5, 0); add(&mut aggregator, 12); @@ -217,11 +211,6 @@ module aptos_framework::optional_aggregator { sub(&mut aggregator, 10); assert!(read(&aggregator) == 10, 0); - // Switch back! - switch(&mut aggregator); - assert!(!is_parallelizable(&aggregator), 0); - assert!(read(&aggregator) == 10, 0); - destroy(aggregator); } @@ -229,24 +218,25 @@ module aptos_framework::optional_aggregator { fun optional_aggregator_destroy_test(account: signer) { aggregator_factory::initialize_aggregator_factory(&account); - let aggregator = new(30, false); + let aggregator = new(false); destroy(aggregator); - let aggregator = new(30, true); + let aggregator = new(true); destroy(aggregator); - let aggregator = new(12, false); - assert!(destroy_optional_integer(aggregator) == 12, 0); + let aggregator = new(false); + assert!(destroy_optional_integer(aggregator) == MAX_U128, 0); - let aggregator = new(21, true); - assert!(destroy_optional_aggregator(aggregator) == 21, 0); + let aggregator = new(true); + assert!(destroy_optional_aggregator(aggregator) == MAX_U128, 0); } #[test(account = @aptos_framework)] #[expected_failure(abort_code = 0x020001, location = Self)] fun non_parallelizable_aggregator_overflow_test(account: signer) { aggregator_factory::initialize_aggregator_factory(&account); - let aggregator = new(15, false); + let aggregator = new(false); + add(&mut aggregator, MAX_U128 - 15); // Overflow! add(&mut aggregator, 16); @@ -258,7 +248,7 @@ module aptos_framework::optional_aggregator { #[expected_failure(abort_code = 0x020002, location = Self)] fun non_parallelizable_aggregator_underflow_test(account: signer) { aggregator_factory::initialize_aggregator_factory(&account); - let aggregator = new(100, false); + let aggregator = new(false); // Underflow! sub(&mut aggregator, 100); @@ -271,7 +261,8 @@ module aptos_framework::optional_aggregator { #[expected_failure(abort_code = 0x020001, location = aptos_framework::aggregator)] fun parallelizable_aggregator_overflow_test(account: signer) { aggregator_factory::initialize_aggregator_factory(&account); - let aggregator = new(15, true); + let aggregator = new(true); + add(&mut aggregator, MAX_U128 - 15); // Overflow! add(&mut aggregator, 16); @@ -283,7 +274,7 @@ module aptos_framework::optional_aggregator { #[expected_failure(abort_code = 0x020002, location = aptos_framework::aggregator)] fun parallelizable_aggregator_underflow_test(account: signer) { aggregator_factory::initialize_aggregator_factory(&account); - let aggregator = new(100, true); + let aggregator = new(true); // Underflow! add(&mut aggregator, 99); diff --git a/aptos-move/framework/aptos-framework/sources/aggregator/optional_aggregator.spec.move b/aptos-move/framework/aptos-framework/sources/aggregator/optional_aggregator.spec.move index bb8e29768447f..4a86db69d0ce7 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator/optional_aggregator.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator/optional_aggregator.spec.move @@ -105,12 +105,8 @@ spec aptos_framework::optional_aggregator { (value > (option::borrow(optional_aggregator.integer).limit - option::borrow(optional_aggregator.integer).value)); } - spec switch(optional_aggregator: &mut OptionalAggregator) { - let vec_ref = optional_aggregator.integer.vec; - aborts_if is_parallelizable(optional_aggregator) && len(vec_ref) != 0; - aborts_if !is_parallelizable(optional_aggregator) && len(vec_ref) == 0; - aborts_if !is_parallelizable(optional_aggregator) && !exists(@aptos_framework); - ensures optional_aggregator_value(optional_aggregator) == optional_aggregator_value(old(optional_aggregator)); + spec switch(_optional_aggregator: &mut OptionalAggregator) { + aborts_if true; } spec sub_integer(integer: &mut Integer, value: u128) { @@ -118,7 +114,7 @@ spec aptos_framework::optional_aggregator { ensures integer.value == old(integer.value) - value; } - spec new(limit: u128, parallelizable: bool): OptionalAggregator { + spec new(parallelizable: bool): OptionalAggregator { aborts_if parallelizable && !exists(@aptos_framework); ensures parallelizable ==> is_parallelizable(result); ensures !parallelizable ==> !is_parallelizable(result); @@ -126,46 +122,6 @@ spec aptos_framework::optional_aggregator { ensures optional_aggregator_value(result) <= optional_aggregator_limit(result); } - /// Option does not exist When Option exists. - /// Option exists when Option does not exist. - /// The AggregatorFactory is under the @aptos_framework when Option does not exist. - spec switch_and_zero_out(optional_aggregator: &mut OptionalAggregator) { - let vec_ref = optional_aggregator.integer.vec; - aborts_if is_parallelizable(optional_aggregator) && len(vec_ref) != 0; - aborts_if !is_parallelizable(optional_aggregator) && len(vec_ref) == 0; - aborts_if !is_parallelizable(optional_aggregator) && !exists(@aptos_framework); - /// [high-level-req-3] - ensures is_parallelizable(old(optional_aggregator)) ==> !is_parallelizable(optional_aggregator); - ensures !is_parallelizable(old(optional_aggregator)) ==> is_parallelizable(optional_aggregator); - ensures optional_aggregator_value(optional_aggregator) == 0; - } - - /// The aggregator exists and the integer dosex not exist when Switches from parallelizable to non-parallelizable implementation. - spec switch_to_integer_and_zero_out( - optional_aggregator: &mut OptionalAggregator - ): u128 { - let limit = aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator)); - aborts_if len(optional_aggregator.aggregator.vec) == 0; - aborts_if len(optional_aggregator.integer.vec) != 0; - ensures !is_parallelizable(optional_aggregator); - ensures option::borrow(optional_aggregator.integer).limit == limit; - ensures option::borrow(optional_aggregator.integer).value == 0; - } - - /// The integer exists and the aggregator does not exist when Switches from non-parallelizable to parallelizable implementation. - /// The AggregatorFactory is under the @aptos_framework. - spec switch_to_aggregator_and_zero_out( - optional_aggregator: &mut OptionalAggregator - ): u128 { - let limit = option::borrow(optional_aggregator.integer).limit; - aborts_if len(optional_aggregator.integer.vec) == 0; - aborts_if !exists(@aptos_framework); - aborts_if len(optional_aggregator.aggregator.vec) != 0; - ensures is_parallelizable(optional_aggregator); - ensures aggregator::spec_get_limit(option::borrow(optional_aggregator.aggregator)) == limit; - ensures aggregator::spec_aggregator_get_val(option::borrow(optional_aggregator.aggregator)) == 0; - } - spec destroy(optional_aggregator: OptionalAggregator) { aborts_if is_parallelizable(optional_aggregator) && len(optional_aggregator.integer.vec) != 0; aborts_if !is_parallelizable(optional_aggregator) && len(optional_aggregator.integer.vec) == 0; diff --git a/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.move b/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.move index 7e26548bc0abd..19e77023b2f55 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.move @@ -38,7 +38,7 @@ module aptos_framework::aggregator_v2 { /// The generic type supplied to the aggregator is not supported. const EUNSUPPORTED_AGGREGATOR_TYPE: u64 = 7; - /// Arguments passed to concat exceed max limit of 256 bytes (for prefix and suffix together). + /// Arguments passed to concat exceed max limit of 1024 bytes (for prefix and suffix together). const ECONCAT_STRING_LENGTH_TOO_LARGE: u64 = 8; /// The native aggregator function, that is in the move file, is not yet supported. @@ -67,8 +67,8 @@ module aptos_framework::aggregator_v2 { } /// Returns `max_value` exceeding which aggregator overflows. - public fun max_value(aggregator: &Aggregator): IntElement { - aggregator.max_value + public fun max_value(self: &Aggregator): IntElement { + self.max_value } /// Creates new aggregator, with given 'max_value'. @@ -79,7 +79,7 @@ module aptos_framework::aggregator_v2 { public fun create_aggregator_with_value(start_value: IntElement, max_value: IntElement): Aggregator { let aggregator = create_aggregator(max_value); - add(&mut aggregator, start_value); + aggregator.add(start_value); aggregator } @@ -92,7 +92,7 @@ module aptos_framework::aggregator_v2 { public fun create_unbounded_aggregator_with_value(start_value: IntElement): Aggregator { let aggregator = create_unbounded_aggregator(); - add(&mut aggregator, start_value); + aggregator.add(start_value); aggregator } @@ -100,31 +100,31 @@ module aptos_framework::aggregator_v2 { /// If addition would exceed the max_value, `false` is returned, and aggregator value is left unchanged. /// /// Parallelism info: This operation enables speculative parallelism. - public native fun try_add(aggregator: &mut Aggregator, value: IntElement): bool; + public native fun try_add(self: &mut Aggregator, value: IntElement): bool; /// Adds `value` to aggregator, unconditionally. /// If addition would exceed the max_value, EAGGREGATOR_OVERFLOW exception will be thrown. /// /// Parallelism info: This operation enables speculative parallelism. - public fun add(aggregator: &mut Aggregator, value: IntElement) { - assert!(try_add(aggregator, value), error::out_of_range(EAGGREGATOR_OVERFLOW)); + public fun add(self: &mut Aggregator, value: IntElement) { + assert!(self.try_add(value), error::out_of_range(EAGGREGATOR_OVERFLOW)); } /// Subtracts `value` from aggregator. /// If subtraction would result in a negative value, `false` is returned, and aggregator value is left unchanged. /// /// Parallelism info: This operation enables speculative parallelism. - public native fun try_sub(aggregator: &mut Aggregator, value: IntElement): bool; + public native fun try_sub(self: &mut Aggregator, value: IntElement): bool; // Subtracts `value` to aggregator, unconditionally. // If subtraction would result in a negative value, EAGGREGATOR_UNDERFLOW exception will be thrown. /// /// Parallelism info: This operation enables speculative parallelism. - public fun sub(aggregator: &mut Aggregator, value: IntElement) { - assert!(try_sub(aggregator, value), error::out_of_range(EAGGREGATOR_UNDERFLOW)); + public fun sub(self: &mut Aggregator, value: IntElement) { + assert!(self.try_sub(value), error::out_of_range(EAGGREGATOR_UNDERFLOW)); } - native fun is_at_least_impl(aggregator: &Aggregator, min_amount: IntElement): bool; + native fun is_at_least_impl(self: &Aggregator, min_amount: IntElement): bool; /// Returns true if aggregator value is larger than or equal to the given `min_amount`, false otherwise. /// @@ -134,9 +134,9 @@ module aptos_framework::aggregator_v2 { /// - for `is_equal(agg, value)`, you can do `is_at_least(value) && !is_at_least(value + 1)` /// /// Parallelism info: This operation enables speculative parallelism. - public fun is_at_least(aggregator: &Aggregator, min_amount: IntElement): bool { + public fun is_at_least(self: &Aggregator, min_amount: IntElement): bool { assert!(features::aggregator_v2_is_at_least_api_enabled(), EAGGREGATOR_API_V2_NOT_ENABLED); - is_at_least_impl(aggregator, min_amount) + self.is_at_least_impl(min_amount) } // TODO waiting for integer traits @@ -159,13 +159,13 @@ module aptos_framework::aggregator_v2 { /// up to two times slower. /// /// Parallelism info: This operation *prevents* speculative parallelism. - public native fun read(aggregator: &Aggregator): IntElement; + public native fun read(self: &Aggregator): IntElement; /// Returns a wrapper of a current value of an aggregator /// Unlike read(), it is fast and avoids sequential dependencies. /// /// Parallelism info: This operation enables parallelism. - public native fun snapshot(aggregator: &Aggregator): AggregatorSnapshot; + public native fun snapshot(self: &Aggregator): AggregatorSnapshot; /// Creates a snapshot of a given value. /// Useful for when object is sometimes created via snapshot() or string_concat(), and sometimes directly. @@ -177,7 +177,7 @@ module aptos_framework::aggregator_v2 { /// or has other read/write conflicts) /// /// Parallelism info: This operation *prevents* speculative parallelism. - public native fun read_snapshot(snapshot: &AggregatorSnapshot): IntElement; + public native fun read_snapshot(self: &AggregatorSnapshot): IntElement; /// Returns a value stored in this DerivedStringSnapshot. /// Note: This operation is resource-intensive, and reduces parallelism. @@ -185,7 +185,7 @@ module aptos_framework::aggregator_v2 { /// or has other read/write conflicts) /// /// Parallelism info: This operation *prevents* speculative parallelism. - public native fun read_derived_string(snapshot: &DerivedStringSnapshot): String; + public native fun read_derived_string(self: &DerivedStringSnapshot): String; /// Creates a DerivedStringSnapshot of a given value. /// Useful for when object is sometimes created via string_concat(), and sometimes directly. @@ -194,7 +194,7 @@ module aptos_framework::aggregator_v2 { /// Concatenates `before`, `snapshot` and `after` into a single string. /// snapshot passed needs to have integer type - currently supported types are u64 and u128. /// Raises EUNSUPPORTED_AGGREGATOR_SNAPSHOT_TYPE if called with another type. - /// If length of prefix and suffix together exceed 256 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised. + /// If length of prefix and suffix together exceeds 1024 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised. /// /// Parallelism info: This operation enables parallelism. public native fun derive_string_concat(before: String, snapshot: &AggregatorSnapshot, after: String): DerivedStringSnapshot; @@ -209,6 +209,199 @@ module aptos_framework::aggregator_v2 { /// DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTION_NOT_YET_SUPPORTED. public native fun string_concat(before: String, snapshot: &AggregatorSnapshot, after: String): AggregatorSnapshot; + #[verify_only] + fun verify_aggregator_try_add_sub(): Aggregator { + let agg = create_aggregator(10); + spec { + assert spec_get_max_value(agg) == 10; + assert spec_get_value(agg) == 0; + }; + let x = try_add(&mut agg, 5); + spec { + assert x; + assert is_at_least(agg, 5); + }; + let y = try_sub(&mut agg, 6); + spec { + assert !y; + assert spec_get_value(agg) == 5; + assert spec_get_max_value(agg) == 10; + }; + let y = try_sub(&mut agg, 4); + spec { + assert y; + assert spec_get_value(agg) == 1; + assert spec_get_max_value(agg) == 10; + }; + let x = try_add(&mut agg, 11); + spec { + assert !x; + assert spec_get_value(agg) == 1; + assert spec_get_max_value(agg) == 10; + }; + let x = try_add(&mut agg, 9); + spec { + assert x; + assert spec_get_value(agg) == 10; + assert spec_get_max_value(agg) == 10; + }; + agg + } + + spec verify_aggregator_try_add_sub{ + ensures spec_get_max_value(result) == 10; + ensures spec_get_value(result) == 10; + ensures read(result) == 10; + } + + #[verify_only] + fun verify_aggregator_add_sub(sub_value: u64, add_value: u64) { + let agg = create_aggregator(10); + add(&mut agg, add_value); + spec { + assert spec_get_value(agg) == add_value; + }; + sub(&mut agg, sub_value); + spec { + assert spec_get_value(agg) == add_value - sub_value; + }; + } + + spec verify_aggregator_add_sub(sub_value: u64, add_value: u64) { + pragma aborts_if_is_strict; + aborts_if add_value > 10; + aborts_if sub_value > add_value; + } + + #[verify_only] + fun verify_correct_read() { + let snapshot = create_snapshot(42); + spec { + assert spec_read_snapshot(snapshot) == 42; + }; + let derived = create_derived_string(std::string::utf8(b"42")); + spec { + assert spec_read_derived_string(derived).bytes == b"42"; + }; + } + + #[verify_only] + fun verify_invalid_read(aggregator: &Aggregator): u8 { + read(aggregator) + } + spec verify_invalid_read { + aborts_if true; + } + + #[verify_only] + fun verify_invalid_is_least(aggregator: &Aggregator): bool { + is_at_least(aggregator, 0) + } + spec verify_invalid_is_least { + aborts_if true; + } + + #[verify_only] + fun verify_copy_not_yet_supported() { + let snapshot = create_snapshot(42); + copy_snapshot(&snapshot); + } + + spec verify_copy_not_yet_supported { + aborts_if true; + } + + #[verify_only] + fun verify_string_concat1() { + let snapshot = create_snapshot(42); + let derived = derive_string_concat(std::string::utf8(b"before"), &snapshot, std::string::utf8(b"after")); + spec { + assert spec_read_derived_string(derived).bytes == + concat(b"before", concat(spec_get_string_value(snapshot).bytes, b"after")); + }; + } + + #[verify_only] + fun verify_aggregator_generic(): (Aggregator, Aggregator){ + let x = create_unbounded_aggregator(); + let y = create_unbounded_aggregator(); + (x, y) + } + spec verify_aggregator_generic (): (Aggregator, Aggregator) { + use aptos_std::type_info; + aborts_if type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; + aborts_if type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; + } + + #[verify_only] + fun verify_aggregator_generic_add(aggregator: &mut Aggregator, value: IntElement) { + try_add(aggregator, value); + is_at_least_impl(aggregator, value); + // cannot specify aborts_if condition for generic `add` + // because comparison is not supported by IntElement + add(aggregator, value); + } + spec verify_aggregator_generic_add(aggregator: &mut Aggregator, value: IntElement) { + use aptos_std::type_info; + aborts_if type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; + } + + #[verify_only] + fun verify_aggregator_generic_sub(aggregator: &mut Aggregator, value: IntElement) { + try_sub(aggregator, value); + // cannot specify aborts_if condition for generic `sub` + // because comparison is not supported by IntElement + sub(aggregator, value); + } + spec verify_aggregator_generic_sub(aggregator: &mut Aggregator, value: IntElement) { + use aptos_std::type_info; + aborts_if type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; + } + + #[verify_only] + fun verify_aggregator_invalid_type1() { + create_unbounded_aggregator(); + } + spec verify_aggregator_invalid_type1 { + aborts_if true; + } + + #[verify_only] + fun verify_snapshot_invalid_type1() { + use std::option; + create_snapshot(option::some(42)); + } + spec verify_snapshot_invalid_type1 { + aborts_if true; + } + + #[verify_only] + fun verify_snapshot_invalid_type2() { + create_snapshot(vector[42]); + } + + spec verify_snapshot_invalid_type2 { + aborts_if true; + } + + #[verify_only] + fun verify_aggregator_valid_type() { + let _agg_1 = create_unbounded_aggregator(); + spec { + assert spec_get_max_value(_agg_1) == MAX_U64; + }; + let _agg_2 = create_unbounded_aggregator(); + spec { + assert spec_get_max_value(_agg_2) == MAX_U128; + }; + create_aggregator(5); + create_aggregator(5); + } + + spec verify_aggregator_valid_type { + aborts_if false; + } + // ======================================== #[test] diff --git a/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.spec.move b/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.spec.move index ad53181cd793e..267e19b777fe8 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.spec.move @@ -1,51 +1,109 @@ spec aptos_framework::aggregator_v2 { - spec create_aggregator { - // TODO: temporary mockup. - pragma opaque; + + spec Aggregator { + pragma intrinsic; } - spec create_unbounded_aggregator { - // TODO: temporary mockup. - pragma opaque; + spec max_value(self: &Aggregator): IntElement { + pragma intrinsic; + } + + spec create_aggregator(max_value: IntElement): Aggregator { + pragma intrinsic; + } + + spec create_unbounded_aggregator(): Aggregator { + pragma intrinsic; + } + + spec try_add(self: &mut Aggregator, value: IntElement): bool { + pragma intrinsic; + } + + spec add(self: &mut Aggregator, value: IntElement) { + pragma intrinsic; + } + + spec try_sub(self: &mut Aggregator, value: IntElement): bool { + pragma intrinsic; + } + + spec sub(self: &mut Aggregator, value: IntElement) { + pragma intrinsic; + } + + spec is_at_least_impl(self: &Aggregator, min_amount: IntElement): bool { + pragma intrinsic; + } + + spec read(self: &Aggregator): IntElement { + pragma intrinsic; } - spec try_add { - // TODO: temporary mockup. + spec snapshot(self: &Aggregator): AggregatorSnapshot { pragma opaque; + include AbortsIfIntElement; + ensures [abstract] result.value == spec_get_value(self); } - spec try_sub { - // TODO: temporary mockup. + spec create_snapshot(value: IntElement): AggregatorSnapshot { pragma opaque; + include AbortsIfIntElement; + ensures [abstract] result.value == value; } - spec is_at_least_impl { - // TODO: temporary mockup. + spec read_snapshot(self: &AggregatorSnapshot): IntElement { pragma opaque; + include AbortsIfIntElement; + ensures [abstract] result == self.value; } - spec read { - // TODO: temporary mockup. + spec read_derived_string(self: &DerivedStringSnapshot): String { pragma opaque; + aborts_if [abstract] false; + ensures [abstract] result == self.value; } - spec snapshot { - // TODO: temporary mockup. + spec create_derived_string(value: String): DerivedStringSnapshot { pragma opaque; + aborts_if [abstract] len(value.bytes) > 1024; + ensures [abstract] result.value == value; } - spec create_snapshot { - // TODO: temporary mockup. + spec derive_string_concat(before: String, snapshot: &AggregatorSnapshot, after: String): DerivedStringSnapshot { pragma opaque; + include AbortsIfIntElement; + ensures [abstract] result.value.bytes == concat(before.bytes, concat(spec_get_string_value(snapshot).bytes, after.bytes)); + aborts_if [abstract] len(before.bytes) + len(after.bytes) > 1024; + } + + spec schema AbortsIfIntElement { + use aptos_std::type_info; + aborts_if [abstract] type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; } + // deprecated spec copy_snapshot { - // TODO: temporary mockup. pragma opaque; + aborts_if [abstract] true; } + // deprecated spec string_concat { - // TODO: temporary mockup. pragma opaque; + aborts_if [abstract] true; + } + + // Get aggregator.value + spec native fun spec_get_value(aggregator: Aggregator): IntElement; + // Get aggregator.max_value + spec native fun spec_get_max_value(aggregator: Aggregator): IntElement; + // Uninterpreted spec function that translates the value inside aggregator into corresponding string representation + spec fun spec_get_string_value(aggregator: AggregatorSnapshot): String; + spec fun spec_read_snapshot(snapshot: AggregatorSnapshot): IntElement { + snapshot.value + } + spec fun spec_read_derived_string(snapshot: DerivedStringSnapshot): String { + snapshot.value } } diff --git a/aptos-move/framework/aptos-framework/sources/aptos_account.move b/aptos-move/framework/aptos-framework/sources/aptos_account.move index 8d7c367cb50ff..022a7b20f0789 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_account.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_account.move @@ -4,7 +4,7 @@ module aptos_framework::aptos_account { use aptos_framework::coin::{Self, Coin}; use aptos_framework::create_signer::create_signer; use aptos_framework::event::{EventHandle, emit_event, emit}; - use aptos_framework::fungible_asset::{Self, Metadata, BurnRef}; + use aptos_framework::fungible_asset::{Self, Metadata, BurnRef, FungibleAsset}; use aptos_framework::primary_fungible_store; use aptos_framework::object; @@ -12,12 +12,12 @@ module aptos_framework::aptos_account { use std::features; use std::signer; use std::vector; + use aptos_framework::object::Object; friend aptos_framework::genesis; friend aptos_framework::resource_account; friend aptos_framework::transaction_fee; friend aptos_framework::transaction_validation; - friend aptos_framework::governed_gas_pool; /// Account does not exist. const EACCOUNT_NOT_FOUND: u64 = 1; @@ -118,9 +118,10 @@ module aptos_framework::aptos_account { if (!account::exists_at(to)) { create_account(to); spec { - assert coin::spec_is_account_registered(to); - assume aptos_std::type_info::type_of() == aptos_std::type_info::type_of() ==> - coin::spec_is_account_registered(to); + // TODO(fa_migration) + // assert coin::spec_is_account_registered(to); + // assume aptos_std::type_info::type_of() == aptos_std::type_info::type_of() ==> + // coin::spec_is_account_registered(to); }; }; if (!coin::is_account_registered(to)) { @@ -133,6 +134,40 @@ module aptos_framework::aptos_account { coin::deposit(to, coins) } + /// Batch version of transfer_fungible_assets. + public entry fun batch_transfer_fungible_assets( + from: &signer, + metadata: Object, + recipients: vector
, + amounts: vector + ) { + let recipients_len = vector::length(&recipients); + assert!( + recipients_len == vector::length(&amounts), + error::invalid_argument(EMISMATCHING_RECIPIENTS_AND_AMOUNTS_LENGTH), + ); + + vector::enumerate_ref(&recipients, |i, to| { + let amount = *vector::borrow(&amounts, i); + transfer_fungible_assets(from, metadata, *to, amount); + }); + } + + /// Convenient function to deposit fungible asset into a recipient account that might not exist. + /// This would create the recipient account first to receive the fungible assets. + public entry fun transfer_fungible_assets(from: &signer, metadata: Object, to: address, amount: u64) { + deposit_fungible_assets(to, primary_fungible_store::withdraw(from, metadata, amount)); + } + + /// Convenient function to deposit fungible asset into a recipient account that might not exist. + /// This would create the recipient account first to receive the fungible assets. + public fun deposit_fungible_assets(to: address, fa: FungibleAsset) { + if (!account::exists_at(to)) { + create_account(to); + }; + primary_fungible_store::deposit(to, fa) + } + public fun assert_account_exists(addr: address) { assert!(account::exists_at(addr), error::not_found(EACCOUNT_NOT_FOUND)); } @@ -156,10 +191,11 @@ module aptos_framework::aptos_account { if (std::features::module_event_migration_enabled()) { emit(DirectCoinTransferConfigUpdated { account: addr, new_allow_direct_transfers: allow }); + } else { + emit_event( + &mut direct_transfer_config.update_coin_transfer_events, + DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow }); }; - emit_event( - &mut direct_transfer_config.update_coin_transfer_events, - DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow }); } else { let direct_transfer_config = DirectTransferConfig { allow_arbitrary_coin_transfers: allow, @@ -167,10 +203,11 @@ module aptos_framework::aptos_account { }; if (std::features::module_event_migration_enabled()) { emit(DirectCoinTransferConfigUpdated { account: addr, new_allow_direct_transfers: allow }); + } else { + emit_event( + &mut direct_transfer_config.update_coin_transfer_events, + DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow }); }; - emit_event( - &mut direct_transfer_config.update_coin_transfer_events, - DirectCoinTransferConfigUpdatedEvent { new_allow_direct_transfers: allow }); move_to(account, direct_transfer_config); }; } @@ -200,7 +237,7 @@ module aptos_framework::aptos_account { /// This would create the recipient APT PFS first, which also registers it to receive APT, before transferring. /// TODO: once migration is complete, rename to just "transfer_only" and make it an entry function (for cheapest way /// to transfer APT) - if we want to allow APT PFS without account itself - fun fungible_transfer_only( + public(friend) entry fun fungible_transfer_only( source: &signer, to: address, amount: u64 ) { let sender_store = ensure_primary_fungible_store_exists(signer::address_of(source)); @@ -211,7 +248,8 @@ module aptos_framework::aptos_account { // as APT cannot be frozen or have dispatch, and PFS cannot be transfered // (PFS could potentially be burned. regular transfer would permanently unburn the store. // Ignoring the check here has the equivalent of unburning, transfers, and then burning again) - fungible_asset::deposit_internal(recipient_store, fungible_asset::withdraw_internal(sender_store, amount)); + fungible_asset::withdraw_permission_check_by_address(source, sender_store, amount); + fungible_asset::unchecked_deposit(recipient_store, fungible_asset::unchecked_withdraw(sender_store, amount)); } /// Is balance from APT Primary FungibleStore at least the given amount @@ -220,8 +258,8 @@ module aptos_framework::aptos_account { fungible_asset::is_address_balance_at_least(store_addr, amount) } - /// Burn from APT Primary FungibleStore - public(friend) fun burn_from_fungible_store( + /// Burn from APT Primary FungibleStore for gas charge + public(friend) fun burn_from_fungible_store_for_gas( ref: &BurnRef, account: address, amount: u64, @@ -229,7 +267,7 @@ module aptos_framework::aptos_account { // Skip burning if amount is zero. This shouldn't error out as it's called as part of transaction fee burning. if (amount != 0) { let store_addr = primary_fungible_store_address(account); - fungible_asset::address_burn_from(ref, store_addr, amount); + fungible_asset::address_burn_from_for_gas(ref, store_addr, amount); }; } @@ -279,12 +317,33 @@ module aptos_framework::aptos_account { coin::destroy_mint_cap(mint_cap); } + #[test(alice = @0xa11ce, core = @0x1)] + public fun test_transfer_permission(alice: &signer, core: &signer) { + use aptos_framework::permissioned_signer; + + let bob = from_bcs::to_address(x"0000000000000000000000000000000000000000000000000000000000000b0b"); + + let (burn_cap, mint_cap) = aptos_framework::aptos_coin::initialize_for_test(core); + create_account(signer::address_of(alice)); + coin::deposit(signer::address_of(alice), coin::mint(10000, &mint_cap)); + + let perm_handle = permissioned_signer::create_permissioned_handle(alice); + let alice_perm_signer = permissioned_signer::signer_from_permissioned_handle(&perm_handle); + primary_fungible_store::grant_apt_permission(alice, &alice_perm_signer, 500); + + transfer(&alice_perm_signer, bob, 500); + + coin::destroy_burn_cap(burn_cap); + coin::destroy_mint_cap(mint_cap); + permissioned_signer::destroy_permissioned_handle(perm_handle); + } + #[test(alice = @0xa11ce, core = @0x1)] public fun test_transfer_to_resource_account(alice: &signer, core: &signer) { let (resource_account, _) = account::create_resource_account(alice, vector[]); let resource_acc_addr = signer::address_of(&resource_account); let (burn_cap, mint_cap) = aptos_framework::aptos_coin::initialize_for_test(core); - assert!(!coin::is_account_registered(resource_acc_addr), 0); + assert!(coin::is_account_registered(resource_acc_addr), 0); create_account(signer::address_of(alice)); coin::deposit(signer::address_of(alice), coin::mint(10000, &mint_cap)); @@ -317,6 +376,7 @@ module aptos_framework::aptos_account { #[test(from = @0x1, to = @0x12)] public fun test_direct_coin_transfers(from: &signer, to: &signer) acquires DirectTransferConfig { + coin::create_coin_conversion_map(from); let (burn_cap, freeze_cap, mint_cap) = coin::initialize( from, utf8(b"FC"), @@ -340,6 +400,7 @@ module aptos_framework::aptos_account { #[test(from = @0x1, recipient_1 = @0x124, recipient_2 = @0x125)] public fun test_batch_transfer_coins( from: &signer, recipient_1: &signer, recipient_2: &signer) acquires DirectTransferConfig { + coin::create_coin_conversion_map(from); let (burn_cap, freeze_cap, mint_cap) = coin::initialize( from, utf8(b"FC"), @@ -381,6 +442,7 @@ module aptos_framework::aptos_account { #[test(from = @0x1, to = @0x12)] public fun test_direct_coin_transfers_with_explicit_direct_coin_transfer_config( from: &signer, to: &signer) acquires DirectTransferConfig { + coin::create_coin_conversion_map(from); let (burn_cap, freeze_cap, mint_cap) = coin::initialize( from, utf8(b"FC"), @@ -406,6 +468,9 @@ module aptos_framework::aptos_account { #[expected_failure(abort_code = 0x50003, location = Self)] public fun test_direct_coin_transfers_fail_if_recipient_opted_out( from: &signer, to: &signer) acquires DirectTransferConfig { + let fa_feature = std::features::get_new_accounts_default_to_fa_store_feature(); + std::features::change_feature_flags_for_testing(from, vector[], vector[fa_feature]); + coin::create_coin_conversion_map(from); let (burn_cap, freeze_cap, mint_cap) = coin::initialize( from, utf8(b"FC"), diff --git a/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move b/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move index ff6faa22ac640..22436b757f11e 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_account.spec.move @@ -69,12 +69,12 @@ spec aptos_framework::aptos_account { /// [high-level-req-1] pragma aborts_if_is_partial; include CreateAccountAbortsIf; - ensures exists(auth_key); + // ensures exists(auth_key); } spec schema CreateAccountAbortsIf { auth_key: address; aborts_if exists(auth_key); - aborts_if length_judgment(auth_key); + // aborts_if length_judgment(auth_key); aborts_if auth_key == @vm_reserved || auth_key == @aptos_framework || auth_key == @aptos_token; } @@ -90,9 +90,6 @@ spec aptos_framework::aptos_account { pragma verify = false; let account_addr_source = signer::address_of(source); - // The 'from' addr is implictly not equal to 'to' addr - requires account_addr_source != to; - include CreateAccountTransferAbortsIf; include GuidAbortsIf; include WithdrawAbortsIf{from: source}; @@ -105,15 +102,16 @@ spec aptos_framework::aptos_account { } spec assert_account_exists(addr: address) { - aborts_if !account::exists_at(addr); + aborts_if !account::spec_exists_at(addr); } /// Check if the address existed. /// Check if the AptosCoin under the address existed. spec assert_account_is_registered_for_apt(addr: address) { pragma aborts_if_is_partial; - aborts_if !account::exists_at(addr); - aborts_if !coin::spec_is_account_registered(addr); + // aborts_if !account::spec_exists_at(addr); + // TODO(fa_migration) + //aborts_if !coin::spec_is_account_registered(addr); } spec set_allow_direct_coin_transfers(account: &signer, allow: bool) { @@ -131,20 +129,20 @@ spec aptos_framework::aptos_account { let coin_store_source = global>(account_addr_source); let balance_source = coin_store_source.coin.value; - requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; - requires exists i in 0..len(recipients): - amounts[i] > 0; + // requires forall i in 0..len(recipients): + // recipients[i] != account_addr_source; + // requires exists i in 0..len(recipients): + // amounts[i] > 0; // create account properties aborts_if len(recipients) != len(amounts); aborts_if exists i in 0..len(recipients): - !account::exists_at(recipients[i]) && length_judgment(recipients[i]); + !account::spec_exists_at(recipients[i]) && length_judgment(recipients[i]); aborts_if exists i in 0..len(recipients): - !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token); + !account::spec_exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token); ensures forall i in 0..len(recipients): - (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) && - (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token)); + (!account::spec_exists_at(recipients[i]) ==> !length_judgment(recipients[i])) && + (!account::spec_exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token)); // coin::withdraw properties aborts_if exists i in 0..len(recipients): @@ -160,9 +158,9 @@ spec aptos_framework::aptos_account { // guid properties aborts_if exists i in 0..len(recipients): - account::exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; + account::spec_exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; aborts_if exists i in 0..len(recipients): - account::exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 > MAX_U64; + account::spec_exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 > MAX_U64; } spec can_receive_direct_coin_transfers(account: address): bool { @@ -176,29 +174,29 @@ spec aptos_framework::aptos_account { spec batch_transfer_coins(from: &signer, recipients: vector
, amounts: vector) { //TODO: Can't verify the loop invariant in enumerate - use aptos_std::type_info; + //use aptos_std::type_info; pragma verify = false; let account_addr_source = signer::address_of(from); let coin_store_source = global>(account_addr_source); let balance_source = coin_store_source.coin.value; - requires forall i in 0..len(recipients): - recipients[i] != account_addr_source; - - requires exists i in 0..len(recipients): - amounts[i] > 0; + // requires forall i in 0..len(recipients): + // recipients[i] != account_addr_source; + // + // requires exists i in 0..len(recipients): + // amounts[i] > 0; /// [high-level-req-7] aborts_if len(recipients) != len(amounts); //create account properties aborts_if exists i in 0..len(recipients): - !account::exists_at(recipients[i]) && length_judgment(recipients[i]); + !account::spec_exists_at(recipients[i]) && length_judgment(recipients[i]); aborts_if exists i in 0..len(recipients): - !account::exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token); + !account::spec_exists_at(recipients[i]) && (recipients[i] == @vm_reserved || recipients[i] == @aptos_framework || recipients[i] == @aptos_token); ensures forall i in 0..len(recipients): - (!account::exists_at(recipients[i]) ==> !length_judgment(recipients[i])) && - (!account::exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token)); + (!account::spec_exists_at(recipients[i]) ==> !length_judgment(recipients[i])) && + (!account::spec_exists_at(recipients[i]) ==> (recipients[i] != @vm_reserved && recipients[i] != @aptos_framework && recipients[i] != @aptos_token)); // coin::withdraw properties aborts_if exists i in 0..len(recipients): @@ -214,13 +212,14 @@ spec aptos_framework::aptos_account { // guid properties aborts_if exists i in 0..len(recipients): - account::exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; + account::spec_exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; aborts_if exists i in 0..len(recipients): - account::exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 > MAX_U64; + account::spec_exists_at(recipients[i]) && !exists>(recipients[i]) && global(recipients[i]).guid_creation_num + 2 > MAX_U64; // register_coin properties - aborts_if exists i in 0..len(recipients): - !coin::spec_is_account_registered(recipients[i]) && !type_info::spec_is_struct(); + // TODO(fa_migration) + // aborts_if exists i in 0..len(recipients): + // //!coin::spec_is_account_registered(recipients[i]) && !type_info::spec_is_struct(); } spec deposit_coins(to: address, coins: Coin) { @@ -241,13 +240,23 @@ spec aptos_framework::aptos_account { ensures if_exist_coin ==> post_coin_store_to == coin_store_to + coins.value; } + spec deposit_fungible_assets(to: address, fa: FungibleAsset) { + pragma verify = false; + } + + spec transfer_fungible_assets(from: &signer, metadata: Object, to: address, amount: u64) { + pragma verify = false; + } + + spec batch_transfer_fungible_assets(from: &signer, metadata: Object, recipients: vector
, amounts: vector) { + pragma verify = false; + } + spec transfer_coins(from: &signer, to: address, amount: u64) { // TODO(fa_migration) pragma verify = false; let account_addr_source = signer::address_of(from); - //The 'from' addr is implictly not equal to 'to' addr - requires account_addr_source != to; include CreateAccountTransferAbortsIf; include WithdrawAbortsIf; @@ -275,7 +284,7 @@ spec aptos_framework::aptos_account { pragma verify = false; } - spec burn_from_fungible_store( + spec burn_from_fungible_store_for_gas( ref: &BurnRef, account: address, amount: u64, @@ -286,8 +295,8 @@ spec aptos_framework::aptos_account { spec schema CreateAccountTransferAbortsIf { to: address; - aborts_if !account::exists_at(to) && length_judgment(to); - aborts_if !account::exists_at(to) && (to == @vm_reserved || to == @aptos_framework || to == @aptos_token); + aborts_if !account::spec_exists_at(to) && length_judgment(to); + aborts_if !account::spec_exists_at(to) && (to == @vm_reserved || to == @aptos_framework || to == @aptos_token); } spec schema WithdrawAbortsIf { @@ -304,14 +313,15 @@ spec aptos_framework::aptos_account { spec schema GuidAbortsIf { to: address; let acc = global(to); - aborts_if account::exists_at(to) && !exists>(to) && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; - aborts_if account::exists_at(to) && !exists>(to) && acc.guid_creation_num + 2 > MAX_U64; + aborts_if account::spec_exists_at(to) && !exists>(to) && acc.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; + aborts_if account::spec_exists_at(to) && !exists>(to) && acc.guid_creation_num + 2 > MAX_U64; } spec schema RegistCoinAbortsIf { use aptos_std::type_info; to: address; - aborts_if !coin::spec_is_account_registered(to) && !type_info::spec_is_struct(); + // TODO(fa_migration) + // aborts_if !coin::spec_is_account_registered(to) && !type_info::spec_is_struct(); aborts_if exists(to); aborts_if type_info::type_of() != type_info::type_of(); } diff --git a/aptos-move/framework/aptos-framework/sources/aptos_coin.move b/aptos-move/framework/aptos-framework/sources/aptos_coin.move index 782cf435fc783..cff0f86f85664 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_coin.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_coin.move @@ -41,8 +41,8 @@ module aptos_framework::aptos_coin { let (burn_cap, freeze_cap, mint_cap) = coin::initialize_with_parallelizable_supply( aptos_framework, - string::utf8(b"Move Coin"), - string::utf8(b"MOVE"), + string::utf8(b"Aptos Coin"), + string::utf8(b"APT"), 8, // decimals true, // monitor_supply ); @@ -61,8 +61,8 @@ module aptos_framework::aptos_coin { /// Only called during genesis to destroy the aptos framework account's mint capability once all initial validators /// and accounts have been initialized during genesis. - public(friend) fun destroy_mint_cap(account: &signer) acquires MintCapStore { - system_addresses::assert_aptos_framework(account); + public(friend) fun destroy_mint_cap(aptos_framework: &signer) acquires MintCapStore { + system_addresses::assert_aptos_framework(aptos_framework); let MintCapStore { mint_cap } = move_from(@aptos_framework); coin::destroy_mint_cap(mint_cap); } @@ -107,21 +107,11 @@ module aptos_framework::aptos_coin { coin::deposit(dst_addr, coins_minted); } - /// Desroy the mint capability from the account. - public fun destroy_mint_capability_from(account: &signer, from: address) acquires MintCapStore { - system_addresses::assert_aptos_framework(account); - let MintCapStore { mint_cap } = move_from(from); - coin::destroy_mint_cap(mint_cap); - } - /// Only callable in tests and testnets where the core resources account exists. /// Create delegated token for the address so the account could claim MintCapability later. public entry fun delegate_mint_capability(account: signer, to: address) acquires Delegations { - system_addresses::assert_aptos_framework(&account); - let delegations = &mut borrow_global_mut(@aptos_framework).inner; - if (!exists(signer::address_of(&account))) { - move_to(&account, Delegations { inner: vector[] }); - }; + system_addresses::assert_core_resource(&account); + let delegations = &mut borrow_global_mut(@core_resources).inner; vector::for_each_ref(delegations, |element| { let element: &DelegatedMintCapability = element; assert!(element.to != to, error::invalid_argument(EALREADY_DELEGATED)); @@ -131,21 +121,20 @@ module aptos_framework::aptos_coin { /// Only callable in tests and testnets where the core resources account exists. /// Claim the delegated mint capability and destroy the delegated token. - //@TODO: restore to non-reference `signer` type public entry fun claim_mint_capability(account: &signer) acquires Delegations, MintCapStore { let maybe_index = find_delegation(signer::address_of(account)); assert!(option::is_some(&maybe_index), EDELEGATION_NOT_FOUND); let idx = *option::borrow(&maybe_index); - let delegations = &mut borrow_global_mut(@aptos_framework).inner; + let delegations = &mut borrow_global_mut(@core_resources).inner; let DelegatedMintCapability { to: _ } = vector::swap_remove(delegations, idx); // Make a copy of mint cap and give it to the specified account. - let mint_cap = borrow_global(@aptos_framework).mint_cap; + let mint_cap = borrow_global(@core_resources).mint_cap; move_to(account, MintCapStore { mint_cap }); } fun find_delegation(addr: address): Option acquires Delegations { - let delegations = &borrow_global(@aptos_framework).inner; + let delegations = &borrow_global(@core_resources).inner; let i = 0; let len = vector::length(delegations); let index = option::none(); @@ -196,7 +185,6 @@ module aptos_framework::aptos_coin { #[test_only] public fun initialize_for_test(aptos_framework: &signer): (BurnCapability, MintCapability) { aggregator_factory::initialize_aggregator_factory_for_test(aptos_framework); - init_delegations(aptos_framework); let (burn_cap, mint_cap) = initialize(aptos_framework); coin::create_coin_conversion_map(aptos_framework); coin::create_pairing(aptos_framework); @@ -213,38 +201,4 @@ module aptos_framework::aptos_coin { coin::create_pairing(aptos_framework); (burn_cap, mint_cap) } - - #[test_only] - /// Initializes the Delegations resource under `@aptos_framework`. - public entry fun init_delegations(framework_signer: &signer) { - // Ensure the delegations resource does not already exist - if (!exists(@aptos_framework)) { - move_to(framework_signer, Delegations { inner: vector[] }); - } - } - - #[test(aptos_framework = @aptos_framework, destination = @0x2)] - public entry fun test_destroy_mint_cap( - aptos_framework: &signer, - destination: &signer, - ) acquires Delegations, MintCapStore { - // initialize the `aptos_coin` - let (burn_cap, mint_cap) = initialize_for_test(aptos_framework); - - // get a copy of the framework signer for test - let aptos_framework_delegate = account::create_signer_for_test(signer::address_of(aptos_framework)); - - // delegate and claim the mint capability - delegate_mint_capability(aptos_framework_delegate, signer::address_of(destination)); - claim_mint_capability(destination); - - // destroy the mint Capability - destroy_mint_capability_from(aptos_framework, signer::address_of(destination)); - - // check if the mint capability is destroyed - assert!(!exists(signer::address_of(destination)), 2); - - coin::destroy_burn_cap(burn_cap); - coin::destroy_mint_cap(mint_cap); - } } diff --git a/aptos-move/framework/aptos-framework/sources/aptos_coin.spec.move b/aptos-move/framework/aptos-framework/sources/aptos_coin.spec.move index 31b5bb553f2e7..080af15b7f7e6 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_coin.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_coin.spec.move @@ -31,16 +31,20 @@ spec aptos_framework::aptos_coin { /// spec module { pragma verify = true; - pragma aborts_if_is_strict; + pragma aborts_if_is_partial; } spec initialize(aptos_framework: &signer): (BurnCapability, MintCapability) { use aptos_framework::aggregator_factory; + use aptos_framework::permissioned_signer; + pragma verify = false; + + aborts_if permissioned_signer::spec_is_permissioned_signer(aptos_framework); let addr = signer::address_of(aptos_framework); aborts_if addr != @aptos_framework; - aborts_if !string::spec_internal_check_utf8(b"Move Coin"); - aborts_if !string::spec_internal_check_utf8(b"MOVE"); + aborts_if !string::spec_internal_check_utf8(b"Aptos Coin"); + aborts_if !string::spec_internal_check_utf8(b"APT"); aborts_if exists(addr); aborts_if exists>(addr); aborts_if !exists(addr); @@ -55,7 +59,7 @@ spec aptos_framework::aptos_coin { } spec destroy_mint_cap { - let addr = signer::address_of(account); + let addr = signer::address_of(aptos_framework); aborts_if addr != @aptos_framework; aborts_if !exists(@aptos_framework); } diff --git a/aptos-move/framework/aptos-framework/sources/aptos_governance.move b/aptos-move/framework/aptos-framework/sources/aptos_governance.move index d33004b79b30c..7d1ae729e99f1 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_governance.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_governance.move @@ -31,6 +31,7 @@ module aptos_framework::aptos_governance { use aptos_framework::system_addresses; use aptos_framework::aptos_coin::{Self, AptosCoin}; use aptos_framework::consensus_config; + use aptos_framework::permissioned_signer; use aptos_framework::randomness_config; use aptos_framework::reconfiguration_with_dkg; use aptos_framework::timestamp; @@ -62,6 +63,10 @@ module aptos_framework::aptos_governance { const EPARTIAL_VOTING_NOT_INITIALIZED: u64 = 13; /// The proposal in the argument is not a partial voting proposal. const ENOT_PARTIAL_VOTING_PROPOSAL: u64 = 14; + /// The proposal has expired. + const EPROPOSAL_EXPIRED: u64 = 15; + /// Current permissioned signer cannot perform governance operations. + const ENO_GOVERNANCE_PERMISSION: u64 = 16; /// This matches the same enum const in voting. We have to duplicate it as Move doesn't have support for enums yet. const PROPOSAL_STATE_SUCCEEDED: u64 = 1; @@ -166,6 +171,21 @@ module aptos_framework::aptos_governance { voting_duration_secs: u64, } + struct GovernancePermission has copy, drop, store {} + + /// Permissions + inline fun check_governance_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, GovernancePermission {}), + error::permission_denied(ENO_GOVERNANCE_PERMISSION), + ); + } + + /// Grant permission to perform governance operations on behalf of the master signer. + public fun grant_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, GovernancePermission {}) + } + /// Can be called during genesis or by the governance itself. /// Stores the signer capability for a given address. public fun store_signer_cap( @@ -199,6 +219,7 @@ module aptos_framework::aptos_governance { system_addresses::assert_aptos_framework(aptos_framework); voting::register(aptos_framework); + initialize_partial_voting(aptos_framework); move_to(aptos_framework, GovernanceConfig { voting_duration_secs, min_voting_threshold, @@ -240,16 +261,17 @@ module aptos_framework::aptos_governance { voting_duration_secs }, ) + } else { + let events = borrow_global_mut(@aptos_framework); + event::emit_event( + &mut events.update_config_events, + UpdateConfigEvent { + min_voting_threshold, + required_proposer_stake, + voting_duration_secs + }, + ); }; - let events = borrow_global_mut(@aptos_framework); - event::emit_event( - &mut events.update_config_events, - UpdateConfigEvent { - min_voting_threshold, - required_proposer_stake, - voting_duration_secs - }, - ); } /// Initializes the state for Aptos Governance partial voting. Can only be called through Aptos governance @@ -321,14 +343,27 @@ module aptos_framework::aptos_governance { stake_pool, proposal_id, }; - let used_voting_power = 0u64; - if (features::partial_governance_voting_enabled()) { - let voting_records_v2 = borrow_global(@aptos_framework); - used_voting_power = *smart_table::borrow_with_default(&voting_records_v2.votes, record_key, &0); - }; + let used_voting_power = *VotingRecordsV2[@aptos_framework].votes.borrow_with_default(record_key, &0); get_voting_power(stake_pool) - used_voting_power } + public fun assert_proposal_expiration(stake_pool: address, proposal_id: u64) { + assert_voting_initialization(); + let proposal_expiration = voting::get_proposal_expiration_secs( + @aptos_framework, + proposal_id + ); + // The voter's stake needs to be locked up at least as long as the proposal's expiration. + assert!( + proposal_expiration <= stake::get_lockup_secs(stake_pool), + error::invalid_argument(EINSUFFICIENT_STAKE_LOCKUP), + ); + assert!( + timestamp::now_seconds() <= proposal_expiration, + error::invalid_argument(EPROPOSAL_EXPIRED), + ); + } + /// Create a single-step proposal with the backing `stake_pool`. /// @param execution_hash Required. This is the hash of the resolution script. When the proposal is resolved, /// only the exact script with matching hash can be successfully executed. @@ -375,6 +410,7 @@ module aptos_framework::aptos_governance { metadata_hash: vector, is_multi_step_proposal: bool, ): u64 acquires GovernanceConfig, GovernanceEvents { + check_governance_permission(proposer); let proposer_address = signer::address_of(proposer); assert!( stake::get_delegated_voter(stake_pool) == proposer_address, @@ -434,18 +470,19 @@ module aptos_framework::aptos_governance { proposal_metadata, }, ); + } else { + let events = borrow_global_mut(@aptos_framework); + event::emit_event( + &mut events.create_proposal_events, + CreateProposalEvent { + proposal_id, + proposer: proposer_address, + stake_pool, + execution_hash, + proposal_metadata, + }, + ); }; - let events = borrow_global_mut(@aptos_framework); - event::emit_event( - &mut events.create_proposal_events, - CreateProposalEvent { - proposal_id, - proposer: proposer_address, - stake_pool, - execution_hash, - proposal_metadata, - }, - ); proposal_id } @@ -506,18 +543,11 @@ module aptos_framework::aptos_governance { voting_power: u64, should_pass: bool, ) acquires ApprovedExecutionHashes, VotingRecords, VotingRecordsV2, GovernanceEvents { + permissioned_signer::assert_master_signer(voter); let voter_address = signer::address_of(voter); assert!(stake::get_delegated_voter(stake_pool) == voter_address, error::invalid_argument(ENOT_DELEGATED_VOTER)); - // The voter's stake needs to be locked up at least as long as the proposal's expiration. - let proposal_expiration = voting::get_proposal_expiration_secs( - @aptos_framework, - proposal_id - ); - assert!( - stake::get_lockup_secs(stake_pool) >= proposal_expiration, - error::invalid_argument(EINSUFFICIENT_STAKE_LOCKUP), - ); + assert_proposal_expiration(stake_pool, proposal_id); // If a stake pool has already voted on a proposal before partial governance voting is enabled, // `get_remaining_voting_power` returns 0. @@ -539,18 +569,9 @@ module aptos_framework::aptos_governance { stake_pool, proposal_id, }; - if (features::partial_governance_voting_enabled()) { - let voting_records_v2 = borrow_global_mut(@aptos_framework); - let used_voting_power = smart_table::borrow_mut_with_default(&mut voting_records_v2.votes, record_key, 0); - // This calculation should never overflow because the used voting cannot exceed the total voting power of this stake pool. - *used_voting_power = *used_voting_power + voting_power; - } else { - let voting_records = borrow_global_mut(@aptos_framework); - assert!( - !table::contains(&voting_records.votes, record_key), - error::invalid_argument(EALREADY_VOTED)); - table::add(&mut voting_records.votes, record_key, true); - }; + let used_voting_power = VotingRecordsV2[@aptos_framework].votes.borrow_mut_with_default(record_key, 0); + // This calculation should never overflow because the used voting cannot exceed the total voting power of this stake pool. + *used_voting_power += voting_power; if (std::features::module_event_migration_enabled()) { event::emit( @@ -562,18 +583,19 @@ module aptos_framework::aptos_governance { should_pass, }, ); + } else { + let events = &mut GovernanceEvents[@aptos_framework]; + event::emit_event( + &mut events.vote_events, + VoteEvent { + proposal_id, + voter: voter_address, + stake_pool, + num_votes: voting_power, + should_pass, + }, + ); }; - let events = borrow_global_mut(@aptos_framework); - event::emit_event( - &mut events.vote_events, - VoteEvent { - proposal_id, - voter: voter_address, - stake_pool, - num_votes: voting_power, - should_pass, - }, - ); let proposal_state = voting::get_proposal_state(@aptos_framework, proposal_id); if (proposal_state == PROPOSAL_STATE_SUCCEEDED) { @@ -699,6 +721,8 @@ module aptos_framework::aptos_governance { public fun get_signer_testnet_only( core_resources: &signer, signer_address: address): signer acquires GovernanceResponsbility { system_addresses::assert_core_resource(core_resources); + // Core resources account only has mint capability in tests/testnets. + assert!(aptos_coin::has_mint_capability(core_resources), error::unauthenticated(EUNAUTHORIZED)); get_signer(signer_address) } @@ -738,9 +762,7 @@ module aptos_framework::aptos_governance { } fun assert_voting_initialization() { - if (features::partial_governance_voting_enabled()) { - assert!(exists(@aptos_framework), error::invalid_state(EPARTIAL_VOTING_NOT_INITIALIZED)); - }; + assert!(exists(@aptos_framework), error::invalid_state(EPARTIAL_VOTING_NOT_INITIALIZED)); } #[test_only] @@ -748,8 +770,7 @@ module aptos_framework::aptos_governance { proposer: &signer, multi_step: bool, ) acquires GovernanceConfig, GovernanceEvents { - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); + let execution_hash = vector[1]; if (multi_step) { create_proposal_v2( proposer, @@ -806,10 +827,9 @@ module aptos_framework::aptos_governance { multi_step: bool, use_generic_resolve_function: bool, ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { - setup_voting(&aptos_framework, &proposer, &yes_voter, &no_voter); + setup_partial_voting(&aptos_framework, &proposer, &yes_voter, &no_voter); - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); + let execution_hash = vector[1]; create_proposal_for_test(&proposer, multi_step); @@ -893,7 +913,7 @@ module aptos_framework::aptos_governance { no_voter: signer, multi_step: bool, ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { - setup_voting(&aptos_framework, &proposer, &yes_voter, &no_voter); + setup_partial_voting(&aptos_framework, &proposer, &yes_voter, &no_voter); create_proposal_for_test(&proposer, multi_step); vote(&yes_voter, signer::address_of(&yes_voter), 0, true); @@ -957,7 +977,7 @@ module aptos_framework::aptos_governance { } #[test(aptos_framework = @aptos_framework, proposer = @0x123, voter_1 = @0x234, voter_2 = @345)] - #[expected_failure(abort_code = 0x10004, location = aptos_framework::voting)] + #[expected_failure(abort_code = 65541, location = aptos_framework::aptos_governance)] public entry fun test_cannot_double_vote( aptos_framework: signer, proposer: signer, @@ -969,7 +989,7 @@ module aptos_framework::aptos_governance { create_proposal( &proposer, signer::address_of(&proposer), - b"", + b"0", b"", b"", ); @@ -980,7 +1000,54 @@ module aptos_framework::aptos_governance { } #[test(aptos_framework = @aptos_framework, proposer = @0x123, voter_1 = @0x234, voter_2 = @345)] - #[expected_failure(abort_code = 0x10004, location = aptos_framework::voting)] + #[expected_failure(abort_code = 65551, location = aptos_framework::aptos_governance)] + public entry fun test_cannot_vote_for_expired_proposal( + aptos_framework: signer, + proposer: signer, + voter_1: signer, + voter_2: signer, + ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { + setup_partial_voting_with_initialized_stake(&aptos_framework, &proposer, &voter_1, &voter_2); + + create_proposal( + &proposer, + signer::address_of(&proposer), + b"0", + b"", + b"", + ); + + timestamp::fast_forward_seconds(2000); + stake::end_epoch(); + + // Should abort because the proposal has expired. + vote(&voter_1, signer::address_of(&voter_1), 0, true); + } + + #[test(aptos_framework = @aptos_framework, proposer = @0x123, voter_1 = @0x234, voter_2 = @0x345)] + #[expected_failure(abort_code = 65539, location = aptos_framework::aptos_governance)] + public entry fun test_cannot_vote_due_to_insufficient_stake_lockup( + aptos_framework: signer, + proposer: signer, + voter_1: signer, + voter_2: signer, + ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { + setup_partial_voting_with_initialized_stake(&aptos_framework, &proposer, &voter_1, &voter_2); + + create_proposal( + &proposer, + signer::address_of(&proposer), + b"0", + b"", + b"", + ); + + // Should abort due to insufficient stake lockup. + vote(&voter_1, signer::address_of(&voter_1), 0, true); + } + + #[test(aptos_framework = @aptos_framework, proposer = @0x123, voter_1 = @0x234, voter_2 = @345)] + #[expected_failure(abort_code = 65541, location = aptos_framework::aptos_governance)] public entry fun test_cannot_double_vote_with_different_voter_addresses( aptos_framework: signer, proposer: signer, @@ -992,7 +1059,7 @@ module aptos_framework::aptos_governance { create_proposal( &proposer, signer::address_of(&proposer), - b"", + b"0", b"", b"", ); @@ -1011,8 +1078,7 @@ module aptos_framework::aptos_governance { voter_2: signer, ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { setup_partial_voting(&aptos_framework, &proposer, &voter_1, &voter_2); - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); + let execution_hash = vector[1]; let proposer_addr = signer::address_of(&proposer); let voter_1_addr = signer::address_of(&voter_1); let voter_2_addr = signer::address_of(&voter_2); @@ -1039,8 +1105,7 @@ module aptos_framework::aptos_governance { voter_2: signer, ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { setup_partial_voting(&aptos_framework, &proposer, &voter_1, &voter_2); - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); + let execution_hash = vector[1]; let proposer_addr = signer::address_of(&proposer); let voter_1_addr = signer::address_of(&voter_1); let voter_2_addr = signer::address_of(&voter_2); @@ -1066,8 +1131,7 @@ module aptos_framework::aptos_governance { ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { features::change_feature_flags_for_testing(&aptos_framework, vector[features::get_coin_to_fungible_asset_migration_feature()], vector[]); setup_partial_voting(&aptos_framework, &proposer, &voter_1, &voter_2); - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); + let execution_hash = vector[1]; let voter_1_addr = signer::address_of(&voter_1); let voter_2_addr = signer::address_of(&voter_2); stake::set_delegated_voter(&voter_2, voter_1_addr); @@ -1085,8 +1149,7 @@ module aptos_framework::aptos_governance { ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { features::change_feature_flags_for_testing(&aptos_framework, vector[features::get_coin_to_fungible_asset_migration_feature()], vector[]); setup_partial_voting(&aptos_framework, &proposer, &voter_1, &voter_2); - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); + let execution_hash = vector[1]; let voter_1_addr = signer::address_of(&voter_1); let voter_2_addr = signer::address_of(&voter_2); stake::set_delegated_voter(&voter_2, voter_1_addr); @@ -1103,8 +1166,7 @@ module aptos_framework::aptos_governance { voter_2: signer, ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { setup_partial_voting(&aptos_framework, &proposer, &voter_1, &voter_2); - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); + let execution_hash = vector[1]; let proposer_addr = signer::address_of(&proposer); let voter_1_addr = signer::address_of(&voter_1); let voter_2_addr = signer::address_of(&voter_2); @@ -1122,43 +1184,6 @@ module aptos_framework::aptos_governance { test_resolving_proposal_generic(aptos_framework, true, execution_hash); } - #[test(aptos_framework = @aptos_framework, proposer = @0x123, voter_1 = @0x234, voter_2 = @345)] - public entry fun test_stake_pool_can_vote_before_and_after_partial_governance_voting_enabled( - aptos_framework: signer, - proposer: signer, - voter_1: signer, - voter_2: signer, - ) acquires ApprovedExecutionHashes, GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { - setup_voting(&aptos_framework, &proposer, &voter_1, &voter_2); - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); - let proposer_addr = signer::address_of(&proposer); - let voter_1_addr = signer::address_of(&voter_1); - let voter_2_addr = signer::address_of(&voter_2); - - create_proposal_for_test(&proposer, true); - vote(&voter_1, voter_1_addr, 0, true); - assert!(get_remaining_voting_power(proposer_addr, 0) == 100, 0); - assert!(get_remaining_voting_power(voter_1_addr, 0) == 0, 1); - assert!(get_remaining_voting_power(voter_2_addr, 0) == 10, 2); - - initialize_partial_voting(&aptos_framework); - features::change_feature_flags_for_testing(&aptos_framework, vector[features::get_partial_governance_voting()], vector[]); - - coin::register(&voter_1); - coin::register(&voter_2); - stake::add_stake(&voter_1, 20); - stake::add_stake(&voter_2, 5); - - // voter1 has already voted before partial governance voting is enalbed. So it cannot vote even after adding stake. - // voter2's voting poewr increase after adding stake. - assert!(get_remaining_voting_power(proposer_addr, 0) == 100, 0); - assert!(get_remaining_voting_power(voter_1_addr, 0) == 0, 1); - assert!(get_remaining_voting_power(voter_2_addr, 0) == 15, 2); - - test_resolving_proposal_generic(aptos_framework, true, execution_hash); - } - #[test(aptos_framework = @aptos_framework, proposer = @0x123, voter_1 = @0x234, voter_2 = @345)] public entry fun test_no_remaining_voting_power_about_proposal_expiration_time( aptos_framework: signer, @@ -1166,9 +1191,7 @@ module aptos_framework::aptos_governance { voter_1: signer, voter_2: signer, ) acquires GovernanceConfig, GovernanceResponsbility, VotingRecords, VotingRecordsV2, GovernanceEvents { - setup_voting_with_initialized_stake(&aptos_framework, &proposer, &voter_1, &voter_2); - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); + setup_partial_voting_with_initialized_stake(&aptos_framework, &proposer, &voter_1, &voter_2); let proposer_addr = signer::address_of(&proposer); let voter_1_addr = signer::address_of(&voter_1); let voter_2_addr = signer::address_of(&voter_2); @@ -1294,6 +1317,16 @@ module aptos_framework::aptos_governance { stake::end_epoch(); } + #[test_only] + public fun setup_partial_voting_with_initialized_stake( + aptos_framework: &signer, + proposer: &signer, + yes_voter: &signer, + no_voter: &signer, + ) acquires GovernanceResponsbility { + setup_voting_with_initialized_stake(aptos_framework, proposer, yes_voter, no_voter); + } + #[test_only] public fun setup_partial_voting( aptos_framework: &signer, @@ -1301,8 +1334,6 @@ module aptos_framework::aptos_governance { voter_1: &signer, voter_2: &signer, ) acquires GovernanceResponsbility { - initialize_partial_voting(aptos_framework); - features::change_feature_flags_for_testing(aptos_framework, vector[features::get_partial_governance_voting()], vector[]); setup_voting(aptos_framework, proposer, voter_1, voter_2); } @@ -1335,7 +1366,7 @@ module aptos_framework::aptos_governance { yes_voter: signer, no_voter: signer, ) acquires GovernanceResponsbility, GovernanceConfig, ApprovedExecutionHashes, VotingRecords, VotingRecordsV2, GovernanceEvents { - setup_voting(&aptos_framework, &proposer, &yes_voter, &no_voter); + setup_partial_voting(&aptos_framework, &proposer, &yes_voter, &no_voter); create_proposal_for_test(&proposer, true); vote(&yes_voter, signer::address_of(&yes_voter), 0, true); diff --git a/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move b/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move index 4c0189023816c..bae16334b1e13 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move @@ -28,8 +28,15 @@ spec aptos_framework::aptos_governance { /// /// spec module { - pragma verify = true; - pragma aborts_if_is_strict; + pragma verify = false; + pragma aborts_if_is_partial; + } + + spec schema AbortsIfPermissionedSigner { + use aptos_framework::permissioned_signer; + s: signer; + let perm = GovernancePermission {}; + aborts_if !permissioned_signer::spec_check_permission_exists(s, perm); } spec store_signer_cap( @@ -59,14 +66,15 @@ spec aptos_framework::aptos_governance { voting_duration_secs: u64, ) { use aptos_std::type_info::Self; + pragma aborts_if_is_partial; let addr = signer::address_of(aptos_framework); let register_account = global(addr); aborts_if exists>(addr); - aborts_if !exists(addr); - aborts_if register_account.guid_creation_num + 7 > MAX_U64; - aborts_if register_account.guid_creation_num + 7 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr); + // aborts_if register_account.guid_creation_num + 7 > MAX_U64; + // aborts_if register_account.guid_creation_num + 7 >= account::MAX_GUID_CREATION_NUM; aborts_if !type_info::spec_is_struct(); include InitializeAbortIf; @@ -76,6 +84,7 @@ spec aptos_framework::aptos_governance { ensures exists(addr); ensures exists(addr); ensures exists(addr); + ensures exists(addr); } /// Signer address must be @aptos_framework. @@ -103,7 +112,8 @@ spec aptos_framework::aptos_governance { aborts_if exists(addr); aborts_if exists(addr); aborts_if exists(addr); - aborts_if !exists(addr); + // aborts_if !exists(addr); + aborts_if exists(addr); } /// Signer address must be @aptos_framework. @@ -120,7 +130,9 @@ spec aptos_framework::aptos_governance { let post new_governance_config = global(@aptos_framework); aborts_if addr != @aptos_framework; aborts_if !exists(@aptos_framework); - aborts_if !exists(@aptos_framework); + aborts_if !features::spec_is_enabled(features::MODULE_EVENT_MIGRATION) && !exists( + @aptos_framework + ); modifies global(addr); ensures new_governance_config.voting_duration_secs == voting_duration_secs; @@ -138,7 +150,6 @@ spec aptos_framework::aptos_governance { use aptos_framework::chain_status; use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; pragma verify = false; // TODO: set because of timeout (property proved). let addr = signer::address_of(aptos_framework); aborts_if addr != @aptos_framework; @@ -146,9 +157,7 @@ spec aptos_framework::aptos_governance { framework: aptos_framework }; include stake::GetReconfigStartTimeRequirement; - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires chain_status::is_operating(); - requires exists(@aptos_framework); requires exists>(@aptos_framework); requires exists(@aptos_framework); include staking_config::StakingRewardsConfigRequirement; @@ -214,6 +223,7 @@ spec aptos_framework::aptos_governance { pragma verify_duration_estimate = 60; requires chain_status::is_operating(); include CreateProposalAbortsIf; + // include AbortsIfPermissionedSigner { s: proposer }; } /// `stake_pool` must exist StakePool. @@ -375,8 +385,7 @@ spec aptos_framework::aptos_governance { } else { 0 }; - aborts_if !remain_zero_1_cond && !entirely_voted && features::spec_partial_governance_voting_enabled() && - used_voting_power > 0 && spec_voting_power < used_voting_power; + aborts_if !remain_zero_1_cond && !entirely_voted && used_voting_power > 0 && spec_voting_power < used_voting_power; let remaining_power = spec_get_remaining_voting_power(stake_pool, proposal_id); let real_voting_power = min(voting_power, remaining_power); @@ -418,8 +427,7 @@ spec aptos_framework::aptos_governance { ensures simple_map::spec_contains_key(post_proposal.metadata, key); ensures simple_map::spec_get(post_proposal.metadata, key) == std::bcs::to_bytes(timestamp::now_seconds()); - aborts_if features::spec_partial_governance_voting_enabled() && used_voting_power + real_voting_power > MAX_U64; - aborts_if !features::spec_partial_governance_voting_enabled() && table::spec_contains(voting_records.votes, record_key); + aborts_if used_voting_power + real_voting_power > MAX_U64; aborts_if !exists(@aptos_framework); @@ -484,7 +492,7 @@ spec aptos_framework::aptos_governance { ensures proposal_state_successed ==> simple_map::spec_contains_key(post_approved_hashes.hashes, proposal_id) && simple_map::spec_get(post_approved_hashes.hashes, proposal_id) == execution_hash; - aborts_if features::spec_partial_governance_voting_enabled() && !exists(@aptos_framework); + aborts_if !exists(@aptos_framework); } spec add_approved_script_hash(proposal_id: u64) { @@ -578,7 +586,6 @@ spec aptos_framework::aptos_governance { use aptos_framework::chain_status; use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; pragma verify = false; // TODO: set because of timeout (property proved). aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); include reconfiguration_with_dkg::FinishRequirement { @@ -586,9 +593,7 @@ spec aptos_framework::aptos_governance { }; include stake::GetReconfigStartTimeRequirement; - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires chain_status::is_operating(); - requires exists(@aptos_framework); requires exists>(@aptos_framework); requires exists(@aptos_framework); include staking_config::StakingRewardsConfigRequirement; @@ -635,7 +640,7 @@ spec aptos_framework::aptos_governance { } spec get_remaining_voting_power(stake_pool: address, proposal_id: u64): u64 { - aborts_if features::spec_partial_governance_voting_enabled() && !exists(@aptos_framework); + aborts_if !exists(@aptos_framework); include voting::AbortsIfNotContainProposalID { voting_forum_address: @aptos_framework }; @@ -662,8 +667,7 @@ spec aptos_framework::aptos_governance { } else { 0 }; - aborts_if !remain_zero_1_cond && !entirely_voted && features::spec_partial_governance_voting_enabled() && - used_voting_power > 0 && voting_power < used_voting_power; + aborts_if !remain_zero_1_cond && !entirely_voted && used_voting_power > 0 && voting_power < used_voting_power; ensures result == spec_get_remaining_voting_power(stake_pool, proposal_id); } @@ -689,8 +693,6 @@ spec aptos_framework::aptos_governance { 0 } else if (entirely_voted) { 0 - } else if (!features::spec_partial_governance_voting_enabled()) { - voting_power } else { voting_power - used_voting_power } @@ -834,6 +836,16 @@ spec aptos_framework::aptos_governance { include VotingInitializationAbortIfs; } + spec assert_proposal_expiration(stake_pool: address, proposal_id: u64) { + include VotingInitializationAbortIfs; + include voting::AbortsIfNotContainProposalID{voting_forum_address: @aptos_framework}; + let proposal_expiration = voting::spec_get_proposal_expiration_secs(@aptos_framework, proposal_id); + aborts_if !stake::stake_pool_exists(stake_pool); + aborts_if proposal_expiration > stake::spec_get_lockup_secs(stake_pool); + aborts_if !exists(@aptos_framework); + aborts_if timestamp::now_seconds() > proposal_expiration; + } + spec force_end_epoch(aptos_framework: &signer) { use aptos_framework::reconfiguration_with_dkg; use std::signer; @@ -845,7 +857,7 @@ spec aptos_framework::aptos_governance { } spec schema VotingInitializationAbortIfs { - aborts_if features::spec_partial_governance_voting_enabled() && !exists(@aptos_framework); + aborts_if !exists(@aptos_framework); } spec force_end_epoch_test_only { diff --git a/aptos-move/framework/aptos-framework/sources/atomic_bridge.move b/aptos-move/framework/aptos-framework/sources/atomic_bridge.move deleted file mode 100644 index b8731f0217735..0000000000000 --- a/aptos-move/framework/aptos-framework/sources/atomic_bridge.move +++ /dev/null @@ -1,621 +0,0 @@ -module aptos_framework::atomic_bridge_initiator { - - const EATOMIC_BRIDGE_DISABLED: u64 = 0x3073d; - - use aptos_framework::event::EventHandle; - - #[event] - struct BridgeTransferInitiatedEvent has store, drop { - bridge_transfer_id: vector, - initiator: address, - recipient: vector, - amount: u64, - hash_lock: vector, - time_lock: u64, - } - - #[event] - struct BridgeTransferCompletedEvent has store, drop { - bridge_transfer_id: vector, - pre_image: vector, - } - - #[event] - struct BridgeTransferRefundedEvent has store, drop { - bridge_transfer_id: vector, - } - - /// This struct will store the event handles for bridge events. - struct BridgeInitiatorEvents has key, store { - bridge_transfer_initiated_events: EventHandle, - bridge_transfer_completed_events: EventHandle, - bridge_transfer_refunded_events: EventHandle, - } - - /// Initializes the module and stores the `EventHandle`s in the resource. - public fun initialize(_aptos_framework: &signer) { - - } - - /// Initiate a bridge transfer of ETH from Movement to the base layer - /// Anyone can initiate a bridge transfer from the source chain - /// The amount is burnt from the initiator - public entry fun initiate_bridge_transfer( - _initiator: &signer, - _recipient: vector, - _hash_lock: vector, - _amount: u64 - ) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Bridge operator can complete the transfer - public entry fun complete_bridge_transfer ( - _caller: &signer, - _bridge_transfer_id: vector, - _pre_image: vector, - ) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Anyone can refund the transfer on the source chain once time lock has passed - public entry fun refund_bridge_transfer ( - _caller: &signer, - _bridge_transfer_id: vector, - ) { - abort EATOMIC_BRIDGE_DISABLED - } - -} - -module aptos_framework::atomic_bridge_store { - use std::vector; - use aptos_std::smart_table::SmartTable; - use aptos_framework::ethereum::EthereumAddress; - use aptos_framework::timestamp; - - friend aptos_framework::atomic_bridge_counterparty; - friend aptos_framework::atomic_bridge_initiator; - - #[test_only] - use std::hash::sha3_256; - - /// Error codes - const EINVALID_PRE_IMAGE : u64 = 0x1; - const ENOT_PENDING_TRANSACTION : u64 = 0x2; - const EEXPIRED : u64 = 0x3; - const ENOT_EXPIRED : u64 = 0x4; - const EINVALID_HASH_LOCK : u64 = 0x5; - const EINVALID_TIME_LOCK : u64 = 0x6; - const EZERO_AMOUNT : u64 = 0x7; - const EINVALID_BRIDGE_TRANSFER_ID : u64 = 0x8; - const EATOMIC_BRIDGE_NOT_ENABLED : u64 = 0x9; - const EATOMIC_BRIDGE_DISABLED: u64 = 0x3073d; - - /// Transaction states - const PENDING_TRANSACTION: u8 = 0x1; - const COMPLETED_TRANSACTION: u8 = 0x2; - const CANCELLED_TRANSACTION: u8 = 0x3; - - /// Minimum time lock of 1 second - const MIN_TIME_LOCK : u64 = 1; - const MAX_U64 : u64 = 0xFFFFFFFFFFFFFFFF; - - struct AddressPair has store, copy { - initiator: Initiator, - recipient: Recipient, - } - - /// A smart table wrapper - struct SmartTableWrapper has key, store { - inner: SmartTable, - } - - /// Details on the transfer - struct BridgeTransferDetails has store, copy { - addresses: AddressPair, - amount: u64, - hash_lock: vector, - time_lock: u64, - state: u8, - } - - struct Nonce has key { - inner: u64 - } - - /// Initializes the initiators and counterparties tables and nonce. - /// - /// @param aptos_framework The signer for Aptos framework. - public fun initialize(_aptos_framework: &signer) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Returns the current time in seconds. - /// - /// @return Current timestamp in seconds. - fun now() : u64 { - timestamp::now_seconds() - } - - /// Creates a time lock by adding a duration to the current time. - /// - /// @param lock The duration to lock. - /// @return The calculated time lock. - /// @abort If lock is not above MIN_TIME_LOCK - public(friend) fun create_time_lock(_time_lock: u64) : u64 { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Creates bridge transfer details with validation. - /// - /// @param initiator The initiating party of the transfer. - /// @param recipient The receiving party of the transfer. - /// @param amount The amount to be transferred. - /// @param hash_lock The hash lock for the transfer. - /// @param time_lock The time lock for the transfer. - /// @return A `BridgeTransferDetails` object. - /// @abort If the amount is zero or locks are invalid. - public(friend) fun create_details(_initiator: Initiator, _recipient: Recipient, _amount: u64, _hash_lock: vector, _time_lock: u64) - : BridgeTransferDetails { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Record details of a transfer - /// - /// @param bridge_transfer_id Bridge transfer ID. - /// @param details The bridge transfer details - public(friend) fun add(_bridge_transfer_id: vector, _details: BridgeTransferDetails) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Asserts that the time lock is valid. - /// - /// @param time_lock - /// @abort If the time lock is invalid. - fun assert_min_time_lock(_time_lock: u64) { - assert!(_time_lock >= MIN_TIME_LOCK, EINVALID_TIME_LOCK); - } - - /// Asserts that the details state is pending. - /// - /// @param details The bridge transfer details to check. - /// @abort If the state is not pending. - fun assert_pending(_details: &BridgeTransferDetails) { - assert!(_details.state == PENDING_TRANSACTION, ENOT_PENDING_TRANSACTION) - } - - /// Asserts that the hash lock is valid. - /// - /// @param hash_lock The hash lock to validate. - /// @abort If the hash lock is invalid. - fun assert_valid_hash_lock(_hash_lock: &vector) { - assert!(vector::length(_hash_lock) == 32, EINVALID_HASH_LOCK); - } - - /// Asserts that the bridge transfer ID is valid. - /// - /// @param bridge_transfer_id The bridge transfer ID to validate. - /// @abort If the ID is invalid. - public(friend) fun assert_valid_bridge_transfer_id(_bridge_transfer_id: &vector) { - assert!(vector::length(_bridge_transfer_id) == 32, EINVALID_BRIDGE_TRANSFER_ID); - } - - /// Creates a hash lock from a pre-image. - /// - /// @param pre_image The pre-image to hash. - /// @return The generated hash lock. - public(friend) fun create_hashlock(_pre_image: vector) : vector { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Asserts that the hash lock matches the expected value. - /// - /// @param details The bridge transfer details. - /// @param hash_lock The hash lock to compare. - /// @abort If the hash lock is incorrect. - fun assert_correct_hash_lock(_details: &BridgeTransferDetails, _hash_lock: vector) { - assert!(&_hash_lock == &_details.hash_lock, EINVALID_PRE_IMAGE); - } - - /// Asserts that the time lock has expired. - /// - /// @param details The bridge transfer details. - /// @abort If the time lock has not expired. - fun assert_timed_out_lock(_details: &BridgeTransferDetails) { - assert!(now() > _details.time_lock, ENOT_EXPIRED); - } - - /// Asserts we are still within the timelock. - /// - /// @param details The bridge transfer details. - /// @abort If the time lock has expired. - fun assert_within_timelock(_details: &BridgeTransferDetails) { - assert!(!(now() > _details.time_lock), EEXPIRED); - } - - /// Completes the bridge transfer. - /// - /// @param details The bridge transfer details to complete. - fun complete(_details: &mut BridgeTransferDetails) { - _details.state = COMPLETED_TRANSACTION; - } - - /// Cancels the bridge transfer. - /// - /// @param details The bridge transfer details to cancel. - fun cancel(_details: &mut BridgeTransferDetails) { - _details.state = CANCELLED_TRANSACTION; - } - - /// Validates and completes a bridge transfer by confirming the hash lock and state. - /// - /// @param hash_lock The hash lock used to validate the transfer. - /// @param details The mutable reference to the bridge transfer details to be completed. - /// @return A tuple containing the recipient and the amount of the transfer. - /// @abort If the hash lock is invalid, the transfer is not pending, or the hash lock does not match. - fun complete_details(_hash_lock: vector, _details: &mut BridgeTransferDetails) : (Recipient, u64) { - assert_valid_hash_lock(&_hash_lock); - assert_pending(_details); - assert_correct_hash_lock(_details, _hash_lock); - assert_within_timelock(_details); - - complete(_details); - - (_details.addresses.recipient, _details.amount) - } - - /// Completes a bridge transfer by validating the hash lock and updating the transfer state. - /// - /// @param bridge_transfer_id The ID of the bridge transfer to complete. - /// @param hash_lock The hash lock used to validate the transfer. - /// @return A tuple containing the recipient of the transfer and the amount transferred. - /// @abort If the bridge transfer details are not found or if the completion checks in `complete_details` fail. - public(friend) fun complete_transfer(_bridge_transfer_id: vector, _hash_lock: vector) : (Recipient, u64) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Cancels a pending bridge transfer if the time lock has expired. - /// - /// @param details A mutable reference to the bridge transfer details to be canceled. - /// @return A tuple containing the initiator of the transfer and the amount to be refunded. - /// @abort If the transfer is not in a pending state or the time lock has not expired. - fun cancel_details(_details: &mut BridgeTransferDetails) : (Initiator, u64) { - assert_pending(_details); - assert_timed_out_lock(_details); - - cancel(_details); - - (_details.addresses.initiator, _details.amount) - } - - /// Cancels a bridge transfer if it is pending and the time lock has expired. - /// - /// @param bridge_transfer_id The ID of the bridge transfer to cancel. - /// @return A tuple containing the initiator of the transfer and the amount to be refunded. - /// @abort If the bridge transfer details are not found or if the cancellation conditions in `cancel_details` fail. - public(friend) fun cancel_transfer(_bridge_transfer_id: vector) : (Initiator, u64) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Generates a unique bridge transfer ID based on transfer details and nonce. - /// - /// @param details The bridge transfer details. - /// @return The generated bridge transfer ID. - public(friend) fun bridge_transfer_id(_details: &BridgeTransferDetails) : vector { - abort EATOMIC_BRIDGE_DISABLED - } - - #[view] - /// Gets initiator bridge transfer details given a bridge transfer ID - /// - /// @param bridge_transfer_id A 32-byte vector of unsigned 8-bit integers. - /// @return A `BridgeTransferDetails` struct. - /// @abort If there is no transfer in the atomic bridge store. - public fun get_bridge_transfer_details_initiator( - _bridge_transfer_id: vector - ): BridgeTransferDetails { - abort EATOMIC_BRIDGE_DISABLED - } - - #[view] - /// Gets counterparty bridge transfer details given a bridge transfer ID - /// - /// @param bridge_transfer_id A 32-byte vector of unsigned 8-bit integers. - /// @return A `BridgeTransferDetails` struct. - /// @abort If there is no transfer in the atomic bridge store. - public fun get_bridge_transfer_details_counterparty( - _bridge_transfer_id: vector - ): BridgeTransferDetails { - abort EATOMIC_BRIDGE_DISABLED - } - - fun get_bridge_transfer_details(_bridge_transfer_id: vector - ): BridgeTransferDetails { - abort EATOMIC_BRIDGE_DISABLED - } - - #[test_only] - public fun valid_bridge_transfer_id() : vector { - sha3_256(b"atomic bridge") - } - - #[test_only] - public fun plain_secret() : vector { - b"too secret!" - } - - #[test_only] - public fun valid_hash_lock() : vector { - abort EATOMIC_BRIDGE_DISABLED - } - -} - -module aptos_framework::atomic_bridge_configuration { - - friend aptos_framework::atomic_bridge_counterparty; - friend aptos_framework::atomic_bridge_initiator; - - /// Error code for invalid bridge operator - const EINVALID_BRIDGE_OPERATOR: u64 = 0x1; - /// Error code for atomic bridge disabled - const EATOMIC_BRIDGE_DISABLED: u64 = 0x2; - - /// Counterparty time lock duration is 24 hours in seconds - const COUNTERPARTY_TIME_LOCK_DUARTION: u64 = 24 * 60 * 60; - /// Initiator time lock duration is 48 hours in seconds - const INITIATOR_TIME_LOCK_DUARTION: u64 = 48 * 60 * 60; - - struct BridgeConfig has key { - bridge_operator: address, - initiator_time_lock: u64, - counterparty_time_lock: u64, - } - - #[event] - /// Event emitted when the bridge operator is updated. - struct BridgeConfigOperatorUpdated has store, drop { - old_operator: address, - new_operator: address, - } - - #[event] - /// Event emitted when the initiator time lock has been updated. - struct InitiatorTimeLockUpdated has store, drop { - time_lock: u64, - } - - #[event] - /// Event emitted when the initiator time lock has been updated. - struct CounterpartyTimeLockUpdated has store, drop { - time_lock: u64, - } - - /// Initializes the bridge configuration with Aptos framework as the bridge operator. - /// - /// @param aptos_framework The signer representing the Aptos framework. - public fun initialize(_aptos_framework: &signer) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Updates the bridge operator, requiring governance validation. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param new_operator The new address to be set as the bridge operator. - /// @abort If the current operator is the same as the new operator. - public fun update_bridge_operator(_aptos_framework: &signer, _new_operator: address - ) { - abort EATOMIC_BRIDGE_DISABLED - } - - public fun set_initiator_time_lock_duration(_aptos_framework: &signer, _time_lock: u64 - ) { - abort EATOMIC_BRIDGE_DISABLED - } - - public fun set_counterparty_time_lock_duration(_aptos_framework: &signer, _time_lock: u64 - ) { - abort EATOMIC_BRIDGE_DISABLED - } - - #[view] - public fun initiator_timelock_duration() : u64 { - abort EATOMIC_BRIDGE_DISABLED - } - - #[view] - public fun counterparty_timelock_duration() : u64 { - abort EATOMIC_BRIDGE_DISABLED - } - - #[view] - /// Retrieves the address of the current bridge operator. - /// - /// @return The address of the current bridge operator. - public fun bridge_operator(): address { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Asserts that the caller is the current bridge operator. - /// - /// @param caller The signer whose authority is being checked. - /// @abort If the caller is not the current bridge operator. - public(friend) fun assert_is_caller_operator(_caller: &signer - ) { - abort EATOMIC_BRIDGE_DISABLED - } - -} - -module aptos_framework::atomic_bridge { - use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::coin::{BurnCapability, MintCapability}; - use aptos_framework::fungible_asset::{BurnRef, MintRef}; - - friend aptos_framework::atomic_bridge_counterparty; - friend aptos_framework::atomic_bridge_initiator; - friend aptos_framework::genesis; - - const EATOMIC_BRIDGE_NOT_ENABLED : u64 = 0x1; - const EATOMIC_BRIDGE_DISABLED: u64 = 0x3073d; - - struct AptosCoinBurnCapability has key { - burn_cap: BurnCapability, - } - - struct AptosCoinMintCapability has key { - mint_cap: MintCapability, - } - - struct AptosFABurnCapabilities has key { - burn_ref: BurnRef, - } - - struct AptosFAMintCapabilities has key { - burn_ref: MintRef, - } - - /// Initializes the atomic bridge by setting up necessary configurations. - /// - /// @param aptos_framework The signer representing the Aptos framework. - public fun initialize(_aptos_framework: &signer) { - abort EATOMIC_BRIDGE_DISABLED - } - - #[test_only] - /// Initializes the atomic bridge for testing purposes, including setting up accounts and timestamps. - /// - /// @param aptos_framework The signer representing the Aptos framework. - public fun initialize_for_test(_aptos_framework: &signer) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Stores the burn capability for AptosCoin, converting to a fungible asset reference if the feature is enabled. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param burn_cap The burn capability for AptosCoin. - public fun store_aptos_coin_burn_cap(_aptos_framework: &signer, _burn_cap: BurnCapability) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Stores the mint capability for AptosCoin. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param mint_cap The mint capability for AptosCoin. - public fun store_aptos_coin_mint_cap(_aptos_framework: &signer, _mint_cap: MintCapability) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Mints a specified amount of AptosCoin to a recipient's address. - /// - /// @param recipient The address of the recipient to mint coins to. - /// @param amount The amount of AptosCoin to mint. - /// @abort If the mint capability is not available. - public(friend) fun mint(_recipient: address, _amount: u64) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Burns a specified amount of AptosCoin from an address. - /// - /// @param from The address from which to burn AptosCoin. - /// @param amount The amount of AptosCoin to burn. - /// @abort If the burn capability is not available. - public(friend) fun burn(_from: address, _amount: u64) { - abort EATOMIC_BRIDGE_DISABLED - } -} - -module aptos_framework::atomic_bridge_counterparty { - use aptos_framework::account; - use aptos_framework::event::EventHandle; - - const EATOMIC_BRIDGE_DISABLED: u64 = 0x3073d; - - #[event] - /// An event triggered upon locking assets for a bridge transfer - struct BridgeTransferLockedEvent has store, drop { - bridge_transfer_id: vector, - initiator: vector, - recipient: address, - amount: u64, - hash_lock: vector, - time_lock: u64, - } - - #[event] - /// An event triggered upon completing a bridge transfer - struct BridgeTransferCompletedEvent has store, drop { - bridge_transfer_id: vector, - pre_image: vector, - } - - #[event] - /// An event triggered upon cancelling a bridge transfer - struct BridgeTransferCancelledEvent has store, drop { - bridge_transfer_id: vector, - } - - /// This struct will store the event handles for bridge events. - struct BridgeCounterpartyEvents has key, store { - bridge_transfer_locked_events: EventHandle, - bridge_transfer_completed_events: EventHandle, - bridge_transfer_cancelled_events: EventHandle, - } - - /// Initializes the module and stores the `EventHandle`s in the resource. - public fun initialize(aptos_framework: &signer) { - move_to(aptos_framework, BridgeCounterpartyEvents { - bridge_transfer_locked_events: account::new_event_handle(aptos_framework), - bridge_transfer_completed_events: account::new_event_handle(aptos_framework), - bridge_transfer_cancelled_events: account::new_event_handle(aptos_framework), - }); - } - - /// Locks assets for a bridge transfer by the initiator. - /// - /// @param caller The signer representing the bridge operator. - /// @param initiator The initiator's Ethereum address as a vector of bytes. - /// @param bridge_transfer_id The unique identifier for the bridge transfer. - /// @param hash_lock The hash lock for securing the transfer. - /// @param time_lock The time lock duration for the transfer. - /// @param recipient The address of the recipient on the Aptos blockchain. - /// @param amount The amount of assets to be locked. - /// @abort If the caller is not the bridge operator. - public entry fun lock_bridge_transfer_assets ( - _caller: &signer, - _initiator: vector, - _bridge_transfer_id: vector, - _hash_lock: vector, - _recipient: address, - _amount: u64 - ) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Completes a bridge transfer by revealing the pre-image. - /// - /// @param bridge_transfer_id The unique identifier for the bridge transfer. - /// @param pre_image The pre-image that matches the hash lock to complete the transfer. - /// @abort If the caller is not the bridge operator or the hash lock validation fails. - public entry fun complete_bridge_transfer ( - _bridge_transfer_id: vector, - _pre_image: vector, - ) { - abort EATOMIC_BRIDGE_DISABLED - } - - /// Aborts a bridge transfer if the time lock has expired. - /// - /// @param caller The signer representing the bridge operator. - /// @param bridge_transfer_id The unique identifier for the bridge transfer. - /// @abort If the caller is not the bridge operator or if the time lock has not expired. - public entry fun abort_bridge_transfer ( - _caller: &signer, - _bridge_transfer_id: vector - ) { - abort EATOMIC_BRIDGE_DISABLED - } - -} - diff --git a/aptos-move/framework/aptos-framework/sources/atomic_bridge.spec.move b/aptos-move/framework/aptos-framework/sources/atomic_bridge.spec.move deleted file mode 100644 index a7a5a393ed101..0000000000000 --- a/aptos-move/framework/aptos-framework/sources/atomic_bridge.spec.move +++ /dev/null @@ -1,3 +0,0 @@ -spec aptos_framework::atomic_bridge_store { - -} diff --git a/aptos-move/framework/aptos-framework/sources/block.move b/aptos-move/framework/aptos-framework/sources/block.move index 5899481316be7..3dc18b06480eb 100644 --- a/aptos-move/framework/aptos-framework/sources/block.move +++ b/aptos-move/framework/aptos-framework/sources/block.move @@ -1,7 +1,6 @@ /// This module defines a struct storing the metadata of the block and new block events. module aptos_framework::block { use std::error; - use std::features; use std::vector; use std::option; use aptos_std::table_with_length::{Self, TableWithLength}; @@ -16,7 +15,6 @@ module aptos_framework::block { use aptos_framework::state_storage; use aptos_framework::system_addresses; use aptos_framework::timestamp; - use aptos_framework::transaction_fee; friend aptos_framework::genesis; @@ -138,11 +136,12 @@ module aptos_framework::block { event::emit( UpdateEpochInterval { old_epoch_interval, new_epoch_interval }, ); + } else { + event::emit_event( + &mut block_resource.update_epoch_interval_events, + UpdateEpochIntervalEvent { old_epoch_interval, new_epoch_interval }, + ); }; - event::emit_event( - &mut block_resource.update_epoch_interval_events, - UpdateEpochIntervalEvent { old_epoch_interval, new_epoch_interval }, - ); } #[view] @@ -179,7 +178,6 @@ module aptos_framework::block { let block_metadata_ref = borrow_global_mut(@aptos_framework); block_metadata_ref.height = event::counter(&block_metadata_ref.new_block_events); - // Emit both event v1 and v2 for compatibility. Eventually only module events will be kept. let new_block_event = NewBlockEvent { hash, epoch, @@ -190,26 +188,7 @@ module aptos_framework::block { failed_proposer_indices, time_microseconds: timestamp, }; - let new_block_event_v2 = NewBlock { - hash, - epoch, - round, - height: block_metadata_ref.height, - previous_block_votes_bitvec, - proposer, - failed_proposer_indices, - time_microseconds: timestamp, - }; - emit_new_block_event(vm, &mut block_metadata_ref.new_block_events, new_block_event, new_block_event_v2); - - if (features::collect_and_distribute_gas_fees()) { - // Assign the fees collected from the previous block to the previous block proposer. - // If for any reason the fees cannot be assigned, this function burns the collected coins. - transaction_fee::process_collected_fees(); - // Set the proposer of this block as the receiver of the fees, so that the fees for this - // block are assigned to the right account. - transaction_fee::register_proposer_for_fee_collection(proposer); - }; + emit_new_block_event(vm, &mut block_metadata_ref.new_block_events, new_block_event); // Performance scores have to be updated before the epoch transition as the transaction that triggers the // transition is the last block in the previous epoch. @@ -278,7 +257,6 @@ module aptos_framework::block { vm: &signer, event_handle: &mut EventHandle, new_block_event: NewBlockEvent, - new_block_event_v2: NewBlock ) acquires CommitHistory { if (exists(@aptos_framework)) { let commit_history_ref = borrow_global_mut(@aptos_framework); @@ -297,9 +275,6 @@ module aptos_framework::block { event::counter(event_handle) == new_block_event.height, error::invalid_argument(ENUM_NEW_BLOCK_EVENTS_DOES_NOT_MATCH_BLOCK_HEIGHT), ); - if (std::features::module_event_migration_enabled()) { - event::emit(new_block_event_v2); - }; event::emit_event(event_handle, new_block_event); } @@ -321,16 +296,6 @@ module aptos_framework::block { failed_proposer_indices: vector::empty(), time_microseconds: 0, }, - NewBlock { - hash: genesis_id, - epoch: 0, - round: 0, - height: 0, - previous_block_votes_bitvec: vector::empty(), - proposer: @vm_reserved, - failed_proposer_indices: vector::empty(), - time_microseconds: 0, - } ); } @@ -354,16 +319,6 @@ module aptos_framework::block { failed_proposer_indices: vector::empty(), time_microseconds: timestamp::now_microseconds(), }, - NewBlock { - hash: fake_block_hash, - epoch: reconfiguration::current_epoch(), - round: MAX_U64, - height: block_metadata_ref.height, - previous_block_votes_bitvec: vector::empty(), - proposer: @vm_reserved, - failed_proposer_indices: vector::empty(), - time_microseconds: timestamp::now_microseconds(), - } ); } diff --git a/aptos-move/framework/aptos-framework/sources/block.spec.move b/aptos-move/framework/aptos-framework/sources/block.spec.move index 2a8d982bd8ed7..488b800fdd5dc 100644 --- a/aptos-move/framework/aptos-framework/sources/block.spec.move +++ b/aptos-move/framework/aptos-framework/sources/block.spec.move @@ -42,6 +42,7 @@ spec aptos_framework::block { /// spec module { use aptos_framework::chain_status; + pragma verify = false; // After genesis, `BlockResource` exist. invariant [suspendable] chain_status::is_operating() ==> exists(@aptos_framework); // After genesis, `CommitHistory` exist. @@ -127,7 +128,6 @@ spec aptos_framework::block { use aptos_framework::chain_status; use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; use aptos_framework::staking_config; vm: signer; @@ -145,9 +145,7 @@ spec aptos_framework::block { requires proposer == @vm_reserved || stake::spec_is_current_epoch_validator(proposer); requires (proposer == @vm_reserved) ==> (timestamp::spec_now_microseconds() == timestamp); requires (proposer != @vm_reserved) ==> (timestamp::spec_now_microseconds() < timestamp); - requires exists(@aptos_framework); requires exists>(@aptos_framework); - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; } diff --git a/aptos-move/framework/aptos-framework/sources/chain_status.move b/aptos-move/framework/aptos-framework/sources/chain_status.move index a2ddd72de0996..32c2ea069001d 100644 --- a/aptos-move/framework/aptos-framework/sources/chain_status.move +++ b/aptos-move/framework/aptos-framework/sources/chain_status.move @@ -43,6 +43,6 @@ module aptos_framework::chain_status { /// Helper function to assert genesis state. public fun assert_genesis() { - assert!(is_genesis(), error::invalid_state(ENOT_GENESIS)); + assert!(is_genesis(), error::invalid_state(ENOT_OPERATING)); } } diff --git a/aptos-move/framework/aptos-framework/sources/code.move b/aptos-move/framework/aptos-framework/sources/code.move index 181c12b94625f..56a0b13d0056c 100644 --- a/aptos-move/framework/aptos-framework/sources/code.move +++ b/aptos-move/framework/aptos-framework/sources/code.move @@ -13,6 +13,9 @@ module aptos_framework::code { use std::string; use aptos_framework::event; use aptos_framework::object::{Self, Object}; + use aptos_framework::permissioned_signer; + + friend aptos_framework::object_code_deployment; // ---------------------------------------------------------------------- // Code Publishing @@ -24,7 +27,7 @@ module aptos_framework::code { } /// Metadata for a package. All byte blobs are represented as base64-of-gzipped-bytes - struct PackageMetadata has store, drop { + struct PackageMetadata has copy, drop, store { /// Name of this package. name: String, /// The upgrade policy of this package. @@ -52,7 +55,7 @@ module aptos_framework::code { } /// Metadata about a module in a package. - struct ModuleMetadata has store, drop { + struct ModuleMetadata has copy, drop, store { /// Name of the module. name: String, /// Source text, gzipped String. Empty if not provided. @@ -105,6 +108,24 @@ module aptos_framework::code { /// `code_object` does not exist. const ECODE_OBJECT_DOES_NOT_EXIST: u64 = 0xA; + /// Current permissioned signer cannot publish codes. + const ENO_CODE_PERMISSION: u64 = 0xB; + + struct CodePublishingPermission has copy, drop, store {} + + /// Permissions + public(friend) fun check_code_publishing_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, CodePublishingPermission {}), + error::permission_denied(ENO_CODE_PERMISSION), + ); + } + + /// Grant permission to publish code on behalf of the master signer. + public fun grant_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, CodePublishingPermission {}) + } + /// Whether unconditional code upgrade with no compatibility check is allowed. This /// publication mode should only be used for modules which aren't shared with user others. /// The developer is responsible for not breaking memory layout of any resources he already @@ -145,6 +166,7 @@ module aptos_framework::code { /// Publishes a package at the given signer's address. The caller must provide package metadata describing the /// package. public fun publish_package(owner: &signer, pack: PackageMetadata, code: vector>) acquires PackageRegistry { + check_code_publishing_permission(owner); // Disallow incompatible upgrade mode. Governance can decide later if this should be reconsidered. assert!( pack.upgrade_policy.policy > upgrade_policy_arbitrary().policy, @@ -206,6 +228,7 @@ module aptos_framework::code { } public fun freeze_code_object(publisher: &signer, code_object: Object) acquires PackageRegistry { + check_code_publishing_permission(publisher); let code_object_addr = object::object_address(&code_object); assert!(exists(code_object_addr), error::not_found(ECODE_OBJECT_DOES_NOT_EXIST)); assert!( @@ -214,10 +237,18 @@ module aptos_framework::code { ); let registry = borrow_global_mut(code_object_addr); - vector::for_each_mut(&mut registry.packages, |pack| { + vector::for_each_mut(&mut registry.packages, |pack| { let package: &mut PackageMetadata = pack; package.upgrade_policy = upgrade_policy_immutable(); }); + + // We unfortunately have to make a copy of each package to avoid borrow checker issues as check_dependencies + // needs to borrow PackageRegistry from the dependency packages. + // This would increase the amount of gas used, but this is a rare operation and it's rare to have many packages + // in a single code object. + vector::for_each(registry.packages, |pack| { + check_dependencies(code_object_addr, &pack); + }); } /// Same as `publish_package` but as an entry function which can be called as a transaction. Because diff --git a/aptos-move/framework/aptos-framework/sources/code.spec.move b/aptos-move/framework/aptos-framework/sources/code.spec.move index f968e0dbbddc3..88c63ccb927d0 100644 --- a/aptos-move/framework/aptos-framework/sources/code.spec.move +++ b/aptos-move/framework/aptos-framework/sources/code.spec.move @@ -59,7 +59,7 @@ spec aptos_framework::code { /// spec module { pragma verify = true; - pragma aborts_if_is_strict; + pragma aborts_if_is_partial; } spec request_publish { @@ -72,6 +72,13 @@ spec aptos_framework::code { pragma opaque; } + spec schema AbortsIfPermissionedSigner { + use aptos_framework::permissioned_signer; + s: signer; + let perm = CodePublishingPermission {}; + aborts_if !permissioned_signer::spec_check_permission_exists(s, perm); + } + spec initialize(aptos_framework: &signer, package_owner: &signer, metadata: PackageMetadata) { let aptos_addr = signer::address_of(aptos_framework); let owner_addr = signer::address_of(package_owner); @@ -86,6 +93,7 @@ spec aptos_framework::code { let addr = signer::address_of(owner); modifies global(addr); aborts_if pack.upgrade_policy.policy <= upgrade_policy_arbitrary().policy; + // include AbortsIfPermissionedSigner { s: owner }; } spec publish_package_txn { @@ -125,6 +133,7 @@ spec aptos_framework::code { aborts_if !exists(code_object_addr); aborts_if !exists(code_object_addr); aborts_if !object::is_owner(code_object, signer::address_of(publisher)); + // include AbortsIfPermissionedSigner { s: publisher }; modifies global(code_object_addr); } diff --git a/aptos-move/framework/aptos-framework/sources/coin.move b/aptos-move/framework/aptos-framework/sources/coin.move index d4e9d12074789..1fac33802c192 100644 --- a/aptos-move/framework/aptos-framework/sources/coin.move +++ b/aptos-move/framework/aptos-framework/sources/coin.move @@ -9,22 +9,22 @@ module aptos_framework::coin { use aptos_framework::account; use aptos_framework::aggregator_factory; - use aptos_framework::aggregator::{Self, Aggregator}; + use aptos_framework::aggregator::Aggregator; use aptos_framework::event::{Self, EventHandle}; use aptos_framework::guid; use aptos_framework::optional_aggregator::{Self, OptionalAggregator}; + use aptos_framework::permissioned_signer; use aptos_framework::system_addresses; use aptos_framework::fungible_asset::{Self, FungibleAsset, Metadata, MintRef, TransferRef, BurnRef}; use aptos_framework::object::{Self, Object, object_address}; use aptos_framework::primary_fungible_store; - use aptos_std::type_info::{Self, TypeInfo, type_name}; + use aptos_std::type_info::{Self, TypeInfo}; use aptos_framework::create_signer; friend aptos_framework::aptos_coin; friend aptos_framework::genesis; friend aptos_framework::transaction_fee; - friend aptos_framework::governed_gas_pool; // // Errors. @@ -108,12 +108,16 @@ module aptos_framework::coin { /// APT pairing is not eanbled yet. const EAPT_PAIRING_IS_NOT_ENABLED: u64 = 28; + /// The decimals of the coin is too large. + const ECOIN_DECIMALS_TOO_LARGE: u64 = 29; + // // Constants // const MAX_COIN_NAME_LENGTH: u64 = 32; - const MAX_COIN_SYMBOL_LENGTH: u64 = 10; + const MAX_COIN_SYMBOL_LENGTH: u64 = 32; + const MAX_DECIMALS: u8 = 32; /// Core data structures @@ -123,9 +127,8 @@ module aptos_framework::coin { value: u64, } - /// Represents a coin with aggregator as its value. This allows to update - /// the coin in every transaction avoiding read-modify-write conflicts. Only - /// used for gas fees distribution by Aptos Framework (0x1). + #[deprecated] + /// DEPRECATED struct AggregatableCoin has store { /// Amount of aggregatable coin this address has. value: Aggregator, @@ -143,9 +146,7 @@ module aptos_framework::coin { withdraw_events: EventHandle, } - /// Maximum possible coin supply. - const MAX_U128: u128 = 340282366920938463463374607431768211455; - + #[deprecated] /// Configuration that controls the behavior of total coin supply. If the field /// is set, coin creators are allowed to upgrade to parallelizable implementations. struct SupplyConfig has key { @@ -210,14 +211,26 @@ module aptos_framework::coin { } + #[deprecated] #[event] /// Module event emitted when the event handles related to coin store is deleted. + /// + /// Deprecated: replaced with CoinStoreDeletion struct CoinEventHandleDeletion has drop, store { event_handle_creation_address: address, deleted_deposit_event_handle_creation_number: u64, deleted_withdraw_event_handle_creation_number: u64, } + #[event] + /// Module event emitted when the event handles related to coin store is deleted. + struct CoinStoreDeletion has drop, store { + coin_type: String, + event_handle_creation_address: address, + deleted_deposit_event_handle_creation_number: u64, + deleted_withdraw_event_handle_creation_number: u64, + } + #[event] /// Module event emitted when a new pair of coin and fungible asset is created. struct PairCreation has drop, store { @@ -225,10 +238,6 @@ module aptos_framework::coin { fungible_asset_metadata_address: address, } - #[resource_group_member(group = aptos_framework::object::ObjectGroup)] - /// The flag the existence of which indicates the primary fungible store is created by the migration from CoinStore. - struct MigrationFlag has key {} - /// Capability required to mint coins. struct MintCapability has copy, store {} @@ -329,7 +338,7 @@ module aptos_framework::coin { }; primary_fungible_store::create_primary_store_enabled_fungible_asset( &metadata_object_cref, - option::map(coin_supply(), |_| MAX_U128), + option::none(), name(), symbol(), decimals(), @@ -534,7 +543,7 @@ module aptos_framework::coin { let metadata = assert_paired_metadata_exists(); let metadata_addr = object_address(&metadata); assert!(exists(metadata_addr), error::internal(EPAIRED_FUNGIBLE_ASSET_REFS_NOT_FOUND)); - let burn_ref_opt = &mut borrow_global_mut(metadata_addr).burn_ref_opt; + let burn_ref_opt = &borrow_global(metadata_addr).burn_ref_opt; assert!(option::is_some(burn_ref_opt), error::not_found(EBURN_REF_NOT_FOUND)); option::borrow(burn_ref_opt) } @@ -543,105 +552,10 @@ module aptos_framework::coin { // Total supply config // - /// Publishes supply configuration. Initially, upgrading is not allowed. - public(friend) fun initialize_supply_config(aptos_framework: &signer) { - system_addresses::assert_aptos_framework(aptos_framework); - move_to(aptos_framework, SupplyConfig { allow_upgrades: false }); - } - /// This should be called by on-chain governance to update the config and allow /// or disallow upgradability of total supply. - public fun allow_supply_upgrades(aptos_framework: &signer, allowed: bool) acquires SupplyConfig { - system_addresses::assert_aptos_framework(aptos_framework); - let allow_upgrades = &mut borrow_global_mut(@aptos_framework).allow_upgrades; - *allow_upgrades = allowed; - } - - // - // Aggregatable coin functions - // - - /// Creates a new aggregatable coin with value overflowing on `limit`. Note that this function can - /// only be called by Aptos Framework (0x1) account for now because of `create_aggregator`. - public(friend) fun initialize_aggregatable_coin(aptos_framework: &signer): AggregatableCoin { - let aggregator = aggregator_factory::create_aggregator(aptos_framework, MAX_U64); - AggregatableCoin { - value: aggregator, - } - } - - /// Returns true if the value of aggregatable coin is zero. - public(friend) fun is_aggregatable_coin_zero(coin: &AggregatableCoin): bool { - let amount = aggregator::read(&coin.value); - amount == 0 - } - - /// Drains the aggregatable coin, setting it to zero and returning a standard coin. - public(friend) fun drain_aggregatable_coin(coin: &mut AggregatableCoin): Coin { - spec { - // TODO: The data invariant is not properly assumed from CollectedFeesPerBlock. - assume aggregator::spec_get_limit(coin.value) == MAX_U64; - }; - let amount = aggregator::read(&coin.value); - assert!(amount <= MAX_U64, error::out_of_range(EAGGREGATABLE_COIN_VALUE_TOO_LARGE)); - spec { - update aggregate_supply = aggregate_supply - amount; - }; - aggregator::sub(&mut coin.value, amount); - spec { - update supply = supply + amount; - }; - Coin { - value: (amount as u64), - } - } - - /// Merges `coin` into aggregatable coin (`dst_coin`). - public(friend) fun merge_aggregatable_coin( - dst_coin: &mut AggregatableCoin, - coin: Coin - ) { - spec { - update supply = supply - coin.value; - }; - let Coin { value } = coin; - let amount = (value as u128); - spec { - update aggregate_supply = aggregate_supply + amount; - }; - aggregator::add(&mut dst_coin.value, amount); - } - - /// Collects a specified amount of coin form an account into aggregatable coin. - public(friend) fun collect_into_aggregatable_coin( - account_addr: address, - amount: u64, - dst_coin: &mut AggregatableCoin, - ) acquires CoinStore, CoinConversionMap, CoinInfo, PairedCoinType { - // Skip collecting if amount is zero. - if (amount == 0) { - return - }; - - let (coin_amount_to_collect, fa_amount_to_collect) = calculate_amount_to_withdraw( - account_addr, - amount - ); - let coin = if (coin_amount_to_collect > 0) { - let coin_store = borrow_global_mut>(account_addr); - extract(&mut coin_store.coin, coin_amount_to_collect) - } else { - zero() - }; - if (fa_amount_to_collect > 0) { - let store_addr = primary_fungible_store::primary_store_address( - account_addr, - option::destroy_some(paired_metadata()) - ); - let fa = fungible_asset::withdraw_internal(store_addr, fa_amount_to_collect); - merge(&mut coin, fungible_asset_to_coin(fa)); - }; - merge_aggregatable_coin(dst_coin, coin); + public fun allow_supply_upgrades(_aptos_framework: &signer, _allowed: bool) { + abort error::invalid_state(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED) } inline fun calculate_amount_to_withdraw( @@ -667,42 +581,57 @@ module aptos_framework::coin { if (!features::coin_to_fungible_asset_migration_feature_enabled()) { abort error::unavailable(ECOIN_TO_FUNGIBLE_ASSET_FEATURE_NOT_ENABLED) }; - assert!(is_coin_initialized(), error::invalid_argument(ECOIN_INFO_NOT_PUBLISHED)); - - let metadata = ensure_paired_metadata(); - let store = primary_fungible_store::ensure_primary_store_exists(account, metadata); - let store_address = object::object_address(&store); if (exists>(account)) { - let CoinStore { coin, frozen, deposit_events, withdraw_events } = move_from>( - account - ); - event::emit( - CoinEventHandleDeletion { + let CoinStore { coin, frozen, deposit_events, withdraw_events } = + move_from>(account); + if (is_coin_initialized()) { + let metadata = ensure_paired_metadata(); + let store = primary_fungible_store::ensure_primary_store_exists(account, metadata); + + event::emit(CoinStoreDeletion { + coin_type: type_info::type_name(), event_handle_creation_address: guid::creator_address( event::guid(&deposit_events) ), deleted_deposit_event_handle_creation_number: guid::creation_num(event::guid(&deposit_events)), deleted_withdraw_event_handle_creation_number: guid::creation_num(event::guid(&withdraw_events)) + }); + + if (coin.value == 0) { + destroy_zero(coin); + } else { + fungible_asset::unchecked_deposit_with_no_events( + object_address(&store), + coin_to_fungible_asset(coin) + ); + }; + + // Note: + // It is possible the primary fungible store may already exist before this function call. + // In this case, if the account owns a frozen CoinStore and an unfrozen primary fungible store, this + // function would convert and deposit the rest coin into the primary store and freeze it to make the + // `frozen` semantic as consistent as possible. + if (frozen != fungible_asset::is_frozen(store)) { + fungible_asset::set_frozen_flag_internal(store, frozen); } - ); - event::destroy_handle(deposit_events); - event::destroy_handle(withdraw_events); - if (coin.value == 0) { - destroy_zero(coin); } else { - fungible_asset::deposit(store, coin_to_fungible_asset(coin)); + destroy_zero(coin); }; - // Note: - // It is possible the primary fungible store may already exist before this function call. - // In this case, if the account owns a frozen CoinStore and an unfrozen primary fungible store, this - // function would convert and deposit the rest coin into the primary store and freeze it to make the - // `frozen` semantic as consistent as possible. - if (frozen != fungible_asset::is_frozen(store)) { - fungible_asset::set_frozen_flag_internal(store, frozen); - } + event::destroy_handle(deposit_events); + event::destroy_handle(withdraw_events); }; - if (!exists(store_address)) { - move_to(&create_signer::create_signer(store_address), MigrationFlag {}); + } + + inline fun assert_signer_has_permission(account: &signer) { + if(permissioned_signer::is_permissioned_signer(account)) { + fungible_asset::withdraw_permission_check_by_address( + account, + primary_fungible_store::primary_store_address( + signer::address_of(account), + ensure_paired_metadata() + ), + 0 + ); } } @@ -710,7 +639,20 @@ module aptos_framework::coin { public entry fun migrate_to_fungible_store( account: &signer ) acquires CoinStore, CoinConversionMap, CoinInfo { - maybe_convert_to_fungible_store(signer::address_of(account)); + let account_addr = signer::address_of(account); + assert_signer_has_permission(account); + maybe_convert_to_fungible_store(account_addr); + } + + /// Migrate to fungible store for `CoinType` if not yet. + public entry fun migrate_coin_store_to_fungible_store( + accounts: vector
+ ) acquires CoinStore, CoinConversionMap, CoinInfo { + if (features::new_accounts_default_to_fa_store_enabled() || features::new_accounts_default_to_fa_apt_store_enabled()) { + std::vector::for_each(accounts, |account| { + maybe_convert_to_fungible_store(account); + }); + } } // @@ -772,7 +714,7 @@ module aptos_framework::coin { /// Returns `true` is account_addr has frozen the CoinStore or if it's not registered at all public fun is_coin_store_frozen( account_addr: address - ): bool acquires CoinStore, CoinConversionMap { + ): bool acquires CoinStore, CoinConversionMap, CoinInfo { if (!is_account_registered(account_addr)) { return true }; @@ -783,15 +725,13 @@ module aptos_framework::coin { #[view] /// Returns `true` if `account_addr` is registered to receive `CoinType`. - public fun is_account_registered(account_addr: address): bool acquires CoinConversionMap { + public fun is_account_registered(account_addr: address): bool acquires CoinConversionMap, CoinInfo { assert!(is_coin_initialized(), error::invalid_argument(ECOIN_INFO_NOT_PUBLISHED)); if (exists>(account_addr)) { true } else { - let paired_metadata_opt = paired_metadata(); - (option::is_some( - &paired_metadata_opt - ) && migrated_primary_fungible_store_exists(account_addr, option::destroy_some(paired_metadata_opt))) + let paired_metadata = ensure_paired_metadata(); + can_receive_paired_fungible_asset(account_addr, paired_metadata) } } @@ -886,6 +826,34 @@ module aptos_framework::coin { }; } + public(friend) fun burn_from_for_gas( + account_addr: address, + amount: u64, + burn_cap: &BurnCapability, + ) acquires CoinInfo, CoinStore, CoinConversionMap, PairedFungibleAssetRefs { + // Skip burning if amount is zero. This shouldn't error out as it's called as part of transaction fee burning. + if (amount == 0) { + return + }; + + let (coin_amount_to_burn, fa_amount_to_burn) = calculate_amount_to_withdraw( + account_addr, + amount + ); + if (coin_amount_to_burn > 0) { + let coin_store = borrow_global_mut>(account_addr); + let coin_to_burn = extract(&mut coin_store.coin, coin_amount_to_burn); + burn(coin_to_burn, burn_cap); + }; + if (fa_amount_to_burn > 0) { + fungible_asset::address_burn_from_for_gas( + borrow_paired_burn_ref(burn_cap), + primary_fungible_store::primary_store_address(account_addr, option::destroy_some(paired_metadata())), + fa_amount_to_burn + ); + }; + } + /// Deposit the coin balance into the recipient's account and emit an event. public fun deposit( account_addr: address, @@ -897,22 +865,14 @@ module aptos_framework::coin { !coin_store.frozen, error::permission_denied(EFROZEN), ); - if (std::features::module_event_migration_enabled()) { - event::emit( - CoinDeposit { coin_type: type_name(), account: account_addr, amount: coin.value } + event::emit_event( + &mut coin_store.deposit_events, + DepositEvent { amount: coin.value }, ); - }; - event::emit_event( - &mut coin_store.deposit_events, - DepositEvent { amount: coin.value }, - ); merge(&mut coin_store.coin, coin); } else { - let metadata = paired_metadata(); - if (option::is_some(&metadata) && migrated_primary_fungible_store_exists( - account_addr, - option::destroy_some(metadata) - )) { + let metadata = ensure_paired_metadata(); + if (can_receive_paired_fungible_asset( account_addr, metadata)) { primary_fungible_store::deposit(account_addr, coin_to_fungible_asset(coin)); } else { abort error::not_found(ECOIN_STORE_NOT_PUBLISHED) @@ -920,20 +880,39 @@ module aptos_framework::coin { } } - inline fun migrated_primary_fungible_store_exists( + public fun deposit_with_signer( + account: &signer, + coin: Coin + ) acquires CoinStore, CoinConversionMap, CoinInfo { + let metadata = ensure_paired_metadata(); + let account_address = signer::address_of(account); + fungible_asset::refill_permission( + account, + coin.value, + primary_fungible_store::primary_store_address_inlined( + account_address, + metadata, + ) + ); + deposit(account_address, coin); + } + + inline fun can_receive_paired_fungible_asset( account_address: address, metadata: Object ): bool { - let primary_store_address = primary_fungible_store::primary_store_address(account_address, metadata); - fungible_asset::store_exists(primary_store_address) && ( - // migration flag is needed, until we start defaulting new accounts to APT PFS - features::new_accounts_default_to_fa_apt_store_enabled() || exists(primary_store_address) - ) + features::new_accounts_default_to_fa_store_enabled() || (features::new_accounts_default_to_fa_apt_store_enabled() && object::object_address(&metadata) == @0xa) || { + let primary_store_address = primary_fungible_store::primary_store_address( + account_address, + metadata + ); + fungible_asset::store_exists(primary_store_address) + } } /// Deposit the coin balance into the recipient's account without checking if the account is frozen. /// This is for internal use only and doesn't emit an DepositEvent. - public(friend) fun force_deposit( + public(friend) fun deposit_for_gas_fee( account_addr: address, coin: Coin ) acquires CoinStore, CoinConversionMap, CoinInfo { @@ -941,15 +920,15 @@ module aptos_framework::coin { let coin_store = borrow_global_mut>(account_addr); merge(&mut coin_store.coin, coin); } else { - let metadata = paired_metadata(); - if (option::is_some(&metadata) && migrated_primary_fungible_store_exists( + let metadata = ensure_paired_metadata(); + if (can_receive_paired_fungible_asset( account_addr, - option::destroy_some(metadata) + metadata )) { let fa = coin_to_fungible_asset(coin); let metadata = fungible_asset::asset_metadata(&fa); - let store = primary_fungible_store::primary_store(account_addr, metadata); - fungible_asset::deposit_internal(object::object_address(&store), fa); + let store = primary_fungible_store::ensure_primary_store_exists(account_addr, metadata); + fungible_asset::unchecked_deposit_with_no_events(object::object_address(&store), fa); } else { abort error::not_found(ECOIN_STORE_NOT_PUBLISHED) } @@ -1015,30 +994,8 @@ module aptos_framework::coin { /// Upgrade total supply to use a parallelizable implementation if it is /// available. - public entry fun upgrade_supply(account: &signer) acquires CoinInfo, SupplyConfig { - let account_addr = signer::address_of(account); - - // Only coin creators can upgrade total supply. - assert!( - coin_address() == account_addr, - error::invalid_argument(ECOIN_INFO_ADDRESS_MISMATCH), - ); - - // Can only succeed once on-chain governance agreed on the upgrade. - assert!( - borrow_global_mut(@aptos_framework).allow_upgrades, - error::permission_denied(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED) - ); - - let maybe_supply = &mut borrow_global_mut>(account_addr).supply; - if (option::is_some(maybe_supply)) { - let supply = option::borrow_mut(maybe_supply); - - // If supply is tracked and the current implementation uses an integer - upgrade. - if (!optional_aggregator::is_parallelizable(supply)) { - optional_aggregator::switch(supply); - } - } + public entry fun upgrade_supply(_account: &signer) { + abort error::invalid_state(ECOIN_SUPPLY_UPGRADE_NOT_SUPPORTED) } /// Creates a new Coin with given `CoinType` and returns minting/freezing/burning capabilities. @@ -1050,7 +1007,7 @@ module aptos_framework::coin { symbol: string::String, decimals: u8, monitor_supply: bool, - ): (BurnCapability, FreezeCapability, MintCapability) { + ): (BurnCapability, FreezeCapability, MintCapability) acquires CoinInfo, CoinConversionMap { initialize_internal(account, name, symbol, decimals, monitor_supply, false) } @@ -1061,7 +1018,7 @@ module aptos_framework::coin { symbol: string::String, decimals: u8, monitor_supply: bool, - ): (BurnCapability, FreezeCapability, MintCapability) { + ): (BurnCapability, FreezeCapability, MintCapability) acquires CoinInfo, CoinConversionMap { system_addresses::assert_aptos_framework(account); initialize_internal(account, name, symbol, decimals, monitor_supply, true) } @@ -1073,8 +1030,9 @@ module aptos_framework::coin { decimals: u8, monitor_supply: bool, parallelizable: bool, - ): (BurnCapability, FreezeCapability, MintCapability) { + ): (BurnCapability, FreezeCapability, MintCapability) acquires CoinInfo, CoinConversionMap { let account_addr = signer::address_of(account); + assert_signer_has_permission(account); assert!( coin_address() == account_addr, @@ -1088,6 +1046,7 @@ module aptos_framework::coin { assert!(string::length(&name) <= MAX_COIN_NAME_LENGTH, error::invalid_argument(ECOIN_NAME_TOO_LONG)); assert!(string::length(&symbol) <= MAX_COIN_SYMBOL_LENGTH, error::invalid_argument(ECOIN_SYMBOL_TOO_LONG)); + assert!(decimals <= MAX_DECIMALS, error::invalid_argument(ECOIN_DECIMALS_TOO_LARGE)); let coin_info = CoinInfo { name, @@ -1095,7 +1054,7 @@ module aptos_framework::coin { decimals, supply: if (monitor_supply) { option::some( - optional_aggregator::new(MAX_U128, parallelizable) + optional_aggregator::new(parallelizable) ) } else { option::none() }, }; @@ -1130,8 +1089,9 @@ module aptos_framework::coin { mint_internal(amount) } - public fun register(account: &signer) acquires CoinConversionMap { + public fun register(account: &signer) acquires CoinInfo, CoinConversionMap { let account_addr = signer::address_of(account); + assert_signer_has_permission(account); // Short-circuit and do nothing if account is already registered for CoinType. if (is_account_registered(account_addr)) { return @@ -1162,51 +1122,6 @@ module aptos_framework::coin { coin.value } - /// Withdraws a specifed `amount` of coin `CoinType` from the specified `account`. - /// @param account The account from which to withdraw the coin. - /// @param amount The amount of coin to withdraw. - public(friend) fun withdraw_from( - account_addr: address, - amount: u64 - ): Coin acquires CoinStore, CoinConversionMap, CoinInfo, PairedCoinType { - - let (coin_amount_to_withdraw, fa_amount_to_withdraw) = calculate_amount_to_withdraw( - account_addr, - amount - ); - let withdrawn_coin = if (coin_amount_to_withdraw > 0) { - let coin_store = borrow_global_mut>(account_addr); - assert!( - !coin_store.frozen, - error::permission_denied(EFROZEN), - ); - if (std::features::module_event_migration_enabled()) { - event::emit( - CoinWithdraw { - coin_type: type_name(), account: account_addr, amount: coin_amount_to_withdraw - } - ); - }; - event::emit_event( - &mut coin_store.withdraw_events, - WithdrawEvent { amount: coin_amount_to_withdraw }, - ); - extract(&mut coin_store.coin, coin_amount_to_withdraw) - } else { - zero() - }; - if (fa_amount_to_withdraw > 0) { - let store_addr = primary_fungible_store::primary_store_address( - account_addr, - option::destroy_some(paired_metadata()) - ); - let fa = fungible_asset::withdraw_internal(store_addr, fa_amount_to_withdraw); - merge(&mut withdrawn_coin, fungible_asset_to_coin(fa)); - }; - - withdrawn_coin - } - /// Withdraw specified `amount` of coin `CoinType` from the signing account. public fun withdraw( account: &signer, @@ -1219,18 +1134,22 @@ module aptos_framework::coin { amount ); let withdrawn_coin = if (coin_amount_to_withdraw > 0) { + let metadata = ensure_paired_metadata(); + if(permissioned_signer::is_permissioned_signer(account)) { + // Perform the check only if the account is a permissioned signer to save the cost of + // computing the primary store location. + fungible_asset::withdraw_permission_check_by_address( + account, + primary_fungible_store::primary_store_address(account_addr, metadata), + coin_amount_to_withdraw + ); + }; + let coin_store = borrow_global_mut>(account_addr); assert!( !coin_store.frozen, error::permission_denied(EFROZEN), ); - if (std::features::module_event_migration_enabled()) { - event::emit( - CoinWithdraw { - coin_type: type_name(), account: account_addr, amount: coin_amount_to_withdraw - } - ); - }; event::emit_event( &mut coin_store.withdraw_events, WithdrawEvent { amount: coin_amount_to_withdraw }, @@ -1318,6 +1237,9 @@ module aptos_framework::coin { amount } + #[test_only] + use aptos_framework::aggregator; + #[test_only] struct FakeMoney {} @@ -1359,7 +1281,7 @@ module aptos_framework::coin { account: &signer, decimals: u8, monitor_supply: bool, - ): (BurnCapability, FreezeCapability, MintCapability) { + ): (BurnCapability, FreezeCapability, MintCapability) acquires CoinInfo, CoinConversionMap { aggregator_factory::initialize_aggregator_factory_for_test(account); initialize( account, @@ -1375,7 +1297,7 @@ module aptos_framework::coin { account: &signer, decimals: u8, monitor_supply: bool, - ): (BurnCapability, FreezeCapability, MintCapability) { + ): (BurnCapability, FreezeCapability, MintCapability) acquires CoinInfo, CoinConversionMap { let (burn_cap, freeze_cap, mint_cap) = initialize_fake_money( account, decimals, @@ -1434,7 +1356,6 @@ module aptos_framework::coin { deposit(source_addr, coins_minted); maybe_convert_to_fungible_store(source_addr); assert!(!coin_store_exists(source_addr), 0); - assert!(coin_store_exists(destination_addr), 0); transfer(&source, destination_addr, 50); maybe_convert_to_fungible_store(destination_addr); @@ -1492,7 +1413,7 @@ module aptos_framework::coin { #[test(source = @0x2, framework = @aptos_framework)] #[expected_failure(abort_code = 0x10001, location = Self)] - public fun fail_initialize(source: signer, framework: signer) { + public fun fail_initialize(source: signer, framework: signer) acquires CoinInfo, CoinConversionMap { aggregator_factory::initialize_aggregator_factory_for_test(&framework); let (burn_cap, freeze_cap, mint_cap) = initialize( &source, @@ -1589,7 +1510,7 @@ module aptos_framework::coin { #[expected_failure(abort_code = 0x10007, location = Self)] public fun test_destroy_non_zero( source: signer, - ) acquires CoinInfo { + ) acquires CoinInfo, CoinConversionMap { account::create_account_for_test(signer::address_of(&source)); let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(&source, 1, true); let coins_minted = mint(100, &mint_cap); @@ -1629,7 +1550,7 @@ module aptos_framework::coin { } #[test(source = @0x1)] - public fun test_is_coin_initialized(source: signer) { + public fun test_is_coin_initialized(source: signer) acquires CoinInfo, CoinConversionMap { assert!(!is_coin_initialized(), 0); let (burn_cap, freeze_cap, mint_cap) = initialize_fake_money(&source, 1, true); @@ -1755,7 +1676,7 @@ module aptos_framework::coin { } #[test_only] - fun initialize_with_aggregator(account: &signer) { + fun initialize_with_aggregator(account: &signer) acquires CoinInfo, CoinConversionMap { let (burn_cap, freeze_cap, mint_cap) = initialize_with_parallelizable_supply( account, string::utf8(b"Fake money"), @@ -1771,7 +1692,7 @@ module aptos_framework::coin { } #[test_only] - fun initialize_with_integer(account: &signer) { + fun initialize_with_integer(account: &signer) acquires CoinInfo, CoinConversionMap { let (burn_cap, freeze_cap, mint_cap) = initialize( account, string::utf8(b"Fake money"), @@ -1789,25 +1710,24 @@ module aptos_framework::coin { #[test(framework = @aptos_framework, other = @0x123)] #[expected_failure(abort_code = 0x50003, location = aptos_framework::system_addresses)] - fun test_supply_initialize_fails(framework: signer, other: signer) { + fun test_supply_initialize_fails(framework: signer, other: signer) acquires CoinInfo, CoinConversionMap { aggregator_factory::initialize_aggregator_factory_for_test(&framework); initialize_with_aggregator(&other); } #[test(other = @0x123)] #[expected_failure(abort_code = 0x10003, location = Self)] - fun test_create_coin_store_with_non_coin_type(other: signer) acquires CoinConversionMap { + fun test_create_coin_store_with_non_coin_type(other: signer) acquires CoinInfo, CoinConversionMap { register(&other); } #[test(other = @0x123)] - #[expected_failure(abort_code = 0x10003, location = Self)] fun test_migration_coin_store_with_non_coin_type(other: signer) acquires CoinConversionMap, CoinStore, CoinInfo { migrate_to_fungible_store(&other); } #[test(framework = @aptos_framework)] - fun test_supply_initialize(framework: signer) acquires CoinInfo { + fun test_supply_initialize(framework: signer) acquires CoinInfo, CoinConversionMap { aggregator_factory::initialize_aggregator_factory_for_test(&framework); initialize_with_aggregator(&framework); @@ -1823,9 +1743,13 @@ module aptos_framework::coin { assert!(optional_aggregator::read(supply) == 1000, 0); } + #[test_only] + /// Maximum possible coin supply. + const MAX_U128: u128 = 340282366920938463463374607431768211455; + #[test(framework = @aptos_framework)] #[expected_failure(abort_code = 0x20001, location = aptos_framework::aggregator)] - fun test_supply_overflow(framework: signer) acquires CoinInfo { + fun test_supply_overflow(framework: signer) acquires CoinInfo, CoinConversionMap { aggregator_factory::initialize_aggregator_factory_for_test(&framework); initialize_with_aggregator(&framework); @@ -1837,101 +1761,12 @@ module aptos_framework::coin { optional_aggregator::sub(supply, 1); } - #[test(framework = @aptos_framework)] - #[expected_failure(abort_code = 0x5000B, location = aptos_framework::coin)] - fun test_supply_upgrade_fails(framework: signer) acquires CoinInfo, SupplyConfig { - initialize_supply_config(&framework); - aggregator_factory::initialize_aggregator_factory_for_test(&framework); - initialize_with_integer(&framework); - - let maybe_supply = &mut borrow_global_mut>(coin_address()).supply; - let supply = option::borrow_mut(maybe_supply); - - // Supply should be non-parallelizable. - assert!(!optional_aggregator::is_parallelizable(supply), 0); - - optional_aggregator::add(supply, 100); - optional_aggregator::sub(supply, 50); - optional_aggregator::add(supply, 950); - assert!(optional_aggregator::read(supply) == 1000, 0); - - upgrade_supply(&framework); - } - - #[test(framework = @aptos_framework)] - fun test_supply_upgrade(framework: signer) acquires CoinInfo, SupplyConfig { - initialize_supply_config(&framework); - aggregator_factory::initialize_aggregator_factory_for_test(&framework); - initialize_with_integer(&framework); - - // Ensure we have a non-parellelizable non-zero supply. - let maybe_supply = &mut borrow_global_mut>(coin_address()).supply; - let supply = option::borrow_mut(maybe_supply); - assert!(!optional_aggregator::is_parallelizable(supply), 0); - optional_aggregator::add(supply, 100); - - // Upgrade. - allow_supply_upgrades(&framework, true); - upgrade_supply(&framework); - - // Check supply again. - let maybe_supply = &mut borrow_global_mut>(coin_address()).supply; - let supply = option::borrow_mut(maybe_supply); - assert!(optional_aggregator::is_parallelizable(supply), 0); - assert!(optional_aggregator::read(supply) == 100, 0); - } - #[test_only] fun destroy_aggregatable_coin_for_test(aggregatable_coin: AggregatableCoin) { let AggregatableCoin { value } = aggregatable_coin; aggregator::destroy(value); } - #[test(framework = @aptos_framework)] - public entry fun test_collect_from_and_drain( - framework: signer, - ) acquires CoinInfo, CoinStore, CoinConversionMap, PairedCoinType { - let framework_addr = signer::address_of(&framework); - account::create_account_for_test(framework_addr); - let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(&framework, 1, true); - - // Collect from coin store only. - let coins_minted = mint(100, &mint_cap); - deposit(framework_addr, coins_minted); - let aggregatable_coin = initialize_aggregatable_coin(&framework); - collect_into_aggregatable_coin(framework_addr, 50, &mut aggregatable_coin); - - let fa_minted = coin_to_fungible_asset(mint(100, &mint_cap)); - primary_fungible_store::deposit(framework_addr, fa_minted); - assert!(balance(framework_addr) == 150, 0); - assert!(*option::borrow(&supply()) == 200, 0); - - // Collect from coin store and fungible store. - collect_into_aggregatable_coin(framework_addr, 100, &mut aggregatable_coin); - - assert!(balance(framework_addr) == 50, 0); - maybe_convert_to_fungible_store(framework_addr); - // Collect from fungible store only. - collect_into_aggregatable_coin(framework_addr, 30, &mut aggregatable_coin); - - // Check that aggregatable coin has the right amount. - let collected_coin = drain_aggregatable_coin(&mut aggregatable_coin); - assert!(is_aggregatable_coin_zero(&aggregatable_coin), 0); - assert!(value(&collected_coin) == 180, 0); - - // Supply of coins should be unchanged, but the balance on the account should decrease. - assert!(balance(framework_addr) == 20, 0); - assert!(*option::borrow(&supply()) == 200, 0); - - burn(collected_coin, &burn_cap); - destroy_aggregatable_coin_for_test(aggregatable_coin); - move_to(&framework, FakeMoneyCapabilities { - burn_cap, - freeze_cap, - mint_cap, - }); - } - #[test_only] fun deposit_to_coin_store(account_addr: address, coin: Coin) acquires CoinStore { assert!( @@ -1944,7 +1779,6 @@ module aptos_framework::coin { !coin_store.frozen, error::permission_denied(EFROZEN), ); - event::emit_event( &mut coin_store.deposit_events, DepositEvent { amount: coin.value }, @@ -2126,6 +1960,7 @@ module aptos_framework::coin { let (mint_ref, mint_ref_receipt) = get_paired_mint_ref(&mint_cap); let (burn_ref, burn_ref_receipt) = get_paired_burn_ref(&burn_cap); let fungible_asset = fungible_asset::mint(&mint_ref, 50); + assert!(option::is_none(&fungible_asset::maximum(ensure_paired_metadata())), 0); assert!(supply() == option::some(150), 0); assert!(coin_supply() == option::some(100), 0); assert!(fungible_asset::supply(ensure_paired_metadata()) == option::some(50), 0); @@ -2151,12 +1986,11 @@ module aptos_framework::coin { } #[test(account = @aptos_framework, aaron = @0xaa10, bob = @0xb0b)] - #[expected_failure(abort_code = 0x60005, location = Self)] fun test_force_deposit( account: &signer, aaron: &signer, bob: &signer - ) acquires CoinConversionMap, CoinInfo, CoinStore { + ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedFungibleAssetRefs { let account_addr = signer::address_of(account); let aaron_addr = signer::address_of(aaron); let bob_addr = signer::address_of(bob); @@ -2164,17 +1998,36 @@ module aptos_framework::coin { account::create_account_for_test(aaron_addr); account::create_account_for_test(bob_addr); let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(account, 1, true); + + assert!(event::emitted_events().length() == 0, 10); + assert!(event::emitted_events().length() == 0, 10); + maybe_convert_to_fungible_store(aaron_addr); + maybe_convert_to_fungible_store(bob_addr); + + assert!(event::emitted_events().length() == 0, 10); deposit(aaron_addr, mint(1, &mint_cap)); + assert!(event::emitted_events().length() == 1, 10); - force_deposit(account_addr, mint(100, &mint_cap)); - force_deposit(aaron_addr, mint(50, &mint_cap)); + deposit_for_gas_fee(account_addr, mint(100, &mint_cap)); + assert!(event::emitted_events().length() == 1, 10); + + deposit_for_gas_fee(aaron_addr, mint(50, &mint_cap)); + assert!(event::emitted_events().length() == 1, 10); assert!( primary_fungible_store::balance(aaron_addr, option::extract(&mut paired_metadata())) == 51, 0 ); assert!(coin_balance(account_addr) == 100, 0); - force_deposit(bob_addr, mint(1, &mint_cap)); + deposit_for_gas_fee(bob_addr, mint(1, &mint_cap)); + assert!(event::emitted_events().length() == 1, 10); + + assert!(event::emitted_events().length() == 0, 10); + burn_from_for_gas(aaron_addr, 1, &burn_cap); + assert!(event::emitted_events().length() == 0, 10); + burn_from(aaron_addr, 1, &burn_cap); + assert!(event::emitted_events().length() == 1, 10); + move_to(account, FakeMoneyCapabilities { burn_cap, freeze_cap, @@ -2182,46 +2035,36 @@ module aptos_framework::coin { }); } - #[test(account = @aptos_framework, aaron = @0xaa10, bob = @0xb0b)] + #[test(account = @aptos_framework, bob = @0xb0b)] fun test_is_account_registered( account: &signer, - aaron: &signer, bob: &signer, ) acquires CoinConversionMap, CoinInfo, CoinStore { let account_addr = signer::address_of(account); - let aaron_addr = signer::address_of(aaron); let bob_addr = signer::address_of(bob); account::create_account_for_test(account_addr); - account::create_account_for_test(aaron_addr); account::create_account_for_test(bob_addr); + let apt_fa_feature = features::get_new_accounts_default_to_fa_apt_store_feature(); + let fa_feature = features::get_new_accounts_default_to_fa_store_feature(); + features::change_feature_flags_for_testing(account, vector[], vector[apt_fa_feature, fa_feature]); let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(account, 1, true); assert!(coin_store_exists(account_addr), 0); assert!(is_account_registered(account_addr), 0); - assert!(!coin_store_exists(aaron_addr), 0); - assert!(!is_account_registered(aaron_addr), 0); - - maybe_convert_to_fungible_store(aaron_addr); - let coin = mint(100, &mint_cap); - deposit(aaron_addr, coin); - - assert!(!coin_store_exists(aaron_addr), 0); - assert!(is_account_registered(aaron_addr), 0); + register(bob); + assert!(coin_store_exists(bob_addr), 0); + maybe_convert_to_fungible_store(bob_addr); + assert!(!coin_store_exists(bob_addr), 0); + register(bob); + assert!(!coin_store_exists(bob_addr), 0); maybe_convert_to_fungible_store(account_addr); assert!(!coin_store_exists(account_addr), 0); assert!(is_account_registered(account_addr), 0); - // Deposit FA to bob to created primary fungible store without `MigrationFlag`. primary_fungible_store::deposit(bob_addr, coin_to_fungible_asset(mint(100, &mint_cap))); assert!(!coin_store_exists(bob_addr), 0); - register(bob); - assert!(coin_store_exists(bob_addr), 0); - maybe_convert_to_fungible_store(bob_addr); - assert!(!coin_store_exists(bob_addr), 0); - register(bob); - assert!(!coin_store_exists(bob_addr), 0); move_to(account, FakeMoneyCapabilities { burn_cap, @@ -2230,7 +2073,7 @@ module aptos_framework::coin { }); } - #[test(account = @aptos_framework, aaron = @0xaa10)] + #[test(account = @aptos_framework)] fun test_migration_with_existing_primary_fungible_store( account: &signer, ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedCoinType { @@ -2243,9 +2086,8 @@ module aptos_framework::coin { assert!(coin_balance(account_addr) == 0, 0); assert!(balance(account_addr) == 100, 0); let coin = withdraw(account, 50); - assert!(!migrated_primary_fungible_store_exists(account_addr, ensure_paired_metadata()), 0); + assert!(can_receive_paired_fungible_asset(account_addr, ensure_paired_metadata()), 0); maybe_convert_to_fungible_store(account_addr); - assert!(migrated_primary_fungible_store_exists(account_addr, ensure_paired_metadata()), 0); deposit(account_addr, coin); assert!(coin_balance(account_addr) == 0, 0); assert!(balance(account_addr) == 100, 0); @@ -2256,4 +2098,229 @@ module aptos_framework::coin { mint_cap, }); } + + #[deprecated] + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + /// The flag the existence of which indicates the primary fungible store is created by the migration from CoinStore. + struct MigrationFlag has key {} + + #[test(account = @aptos_framework)] + #[expected_failure(abort_code = 0x50024, location = aptos_framework::fungible_asset)] + fun test_withdraw_with_permissioned_signer_no_migration( + account: &signer, + ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedCoinType { + account::create_account_for_test(signer::address_of(account)); + let account_addr = signer::address_of(account); + let (burn_cap, freeze_cap, mint_cap) = initialize_fake_money(account, 1, true); + create_coin_store(account); + create_coin_conversion_map(account); + + let coin = mint(100, &mint_cap); + deposit(account_addr, coin); + + let permissioned_handle = permissioned_signer::create_permissioned_handle(account); + let permissioned_signer = permissioned_signer::signer_from_permissioned_handle(&permissioned_handle); + + // Withdraw from permissioned signer with no migration rules set + // + // Aborted with error. + let coin_2 = withdraw(&permissioned_signer, 10); + permissioned_signer::destroy_permissioned_handle(permissioned_handle); + + burn(coin_2, &burn_cap); + move_to(account, FakeMoneyCapabilities { + burn_cap, + freeze_cap, + mint_cap, + }); + } + + #[test(account = @aptos_framework)] + #[expected_failure(abort_code = 0x50024, location = aptos_framework::fungible_asset)] + fun test_withdraw_with_permissioned_signer( + account: &signer, + ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedCoinType { + account::create_account_for_test(signer::address_of(account)); + let account_addr = signer::address_of(account); + let (burn_cap, freeze_cap, mint_cap) = initialize_fake_money(account, 1, true); + create_coin_store(account); + create_coin_conversion_map(account); + + let coin = mint(100, &mint_cap); + deposit(account_addr, coin); + + let permissioned_handle = permissioned_signer::create_permissioned_handle(account); + let permissioned_signer = permissioned_signer::signer_from_permissioned_handle(&permissioned_handle); + + // Withdraw from permissioned signer with no migration rules set + // + // Aborted with error. + let coin_2 = withdraw(&permissioned_signer, 10); + permissioned_signer::destroy_permissioned_handle(permissioned_handle); + + burn(coin_2, &burn_cap); + move_to(account, FakeMoneyCapabilities { + burn_cap, + freeze_cap, + mint_cap, + }); + } + + #[test(account = @aptos_framework)] + #[expected_failure(abort_code = 0x50024, location = aptos_framework::fungible_asset)] + fun test_withdraw_with_permissioned_signer_no_capacity( + account: &signer, + ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedCoinType { + account::create_account_for_test(signer::address_of(account)); + let account_addr = signer::address_of(account); + let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(account, 1, true); + ensure_paired_metadata(); + + let coin = mint(100, &mint_cap); + deposit(account_addr, coin); + + let permissioned_handle = permissioned_signer::create_permissioned_handle(account); + let permissioned_signer = permissioned_signer::signer_from_permissioned_handle(&permissioned_handle); + + // Withdraw from permissioned signer with no permissions granted. + let coin_2 = withdraw(&permissioned_signer, 10); + permissioned_signer::destroy_permissioned_handle(permissioned_handle); + + burn(coin_2, &burn_cap); + move_to(account, FakeMoneyCapabilities { + burn_cap, + freeze_cap, + mint_cap, + }); + } + + #[test(account = @aptos_framework)] + fun test_e2e_withdraw_with_permissioned_signer_and_migration( + account: &signer, + ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedCoinType { + account::create_account_for_test(signer::address_of(account)); + let account_addr = signer::address_of(account); + let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(account, 1, true); + let metadata = ensure_paired_metadata(); + + let coin = mint(100, &mint_cap); + deposit(account_addr, coin); + + let permissioned_handle = permissioned_signer::create_permissioned_handle(account); + let permissioned_signer = permissioned_signer::signer_from_permissioned_handle(&permissioned_handle); + primary_fungible_store::grant_permission(account, &permissioned_signer, metadata, 10); + + // Withdraw from permissioned signer with proper permissions. + let coin_2 = withdraw(&permissioned_signer, 10); + burn(coin_2, &burn_cap); + + // Withdraw with some funds from CoinStore and some from PFS. + primary_fungible_store::deposit(account_addr, coin_to_fungible_asset(mint(100, &mint_cap))); + primary_fungible_store::grant_permission(account, &permissioned_signer, metadata, 100); + let coin_2 = withdraw(&permissioned_signer, 100); + burn(coin_2, &burn_cap); + + // Withdraw funds from PFS only. + assert!(coin_balance(account_addr) == 0, 1); + primary_fungible_store::grant_permission(account, &permissioned_signer, metadata, 10); + let coin_2 = withdraw(&permissioned_signer, 10); + burn(coin_2, &burn_cap); + + permissioned_signer::destroy_permissioned_handle(permissioned_handle); + move_to(account, FakeMoneyCapabilities { + burn_cap, + freeze_cap, + mint_cap, + }); + } + + #[test(account = @aptos_framework)] + #[expected_failure(abort_code = 0x50024, location = aptos_framework::fungible_asset)] + fun test_e2e_withdraw_with_permissioned_signer_no_permission_1( + account: &signer, + ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedCoinType { + account::create_account_for_test(signer::address_of(account)); + let account_addr = signer::address_of(account); + let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(account, 1, true); + let metadata = ensure_paired_metadata(); + + let coin = mint(100, &mint_cap); + deposit(account_addr, coin); + + let permissioned_handle = permissioned_signer::create_permissioned_handle(account); + let permissioned_signer = permissioned_signer::signer_from_permissioned_handle(&permissioned_handle); + primary_fungible_store::grant_permission(account, &permissioned_signer, metadata, 10); + + let coin_2 = withdraw(&permissioned_signer, 20); + burn(coin_2, &burn_cap); + + permissioned_signer::destroy_permissioned_handle(permissioned_handle); + move_to(account, FakeMoneyCapabilities { + burn_cap, + freeze_cap, + mint_cap, + }); + } + + #[test(account = @aptos_framework)] + #[expected_failure(abort_code = 0x50024, location = aptos_framework::fungible_asset)] + fun test_e2e_withdraw_with_permissioned_signer_no_permission_2( + account: &signer, + ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedCoinType { + account::create_account_for_test(signer::address_of(account)); + let account_addr = signer::address_of(account); + let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(account, 1, true); + let metadata = ensure_paired_metadata(); + + let coin = mint(100, &mint_cap); + deposit(account_addr, coin); + + let permissioned_handle = permissioned_signer::create_permissioned_handle(account); + let permissioned_signer = permissioned_signer::signer_from_permissioned_handle(&permissioned_handle); + primary_fungible_store::grant_permission(account, &permissioned_signer, metadata, 10); + + // Withdraw from permissioned signer with proper permissions. + let coin_2 = withdraw(&permissioned_signer, 10); + burn(coin_2, &burn_cap); + + // Withdraw with some funds from CoinStore and some from PFS. + primary_fungible_store::deposit(account_addr, coin_to_fungible_asset(mint(100, &mint_cap))); + primary_fungible_store::grant_permission(account, &permissioned_signer, metadata, 90); + let coin_2 = withdraw(&permissioned_signer, 100); + burn(coin_2, &burn_cap); + + permissioned_signer::destroy_permissioned_handle(permissioned_handle); + move_to(account, FakeMoneyCapabilities { + burn_cap, + freeze_cap, + mint_cap, + }); + } + + #[test(account = @aptos_framework)] + #[expected_failure(abort_code = 0x50024, location = aptos_framework::fungible_asset)] + fun test_e2e_withdraw_with_permissioned_signer_no_permission_3( + account: &signer, + ) acquires CoinConversionMap, CoinInfo, CoinStore, PairedCoinType { + account::create_account_for_test(signer::address_of(account)); + let account_addr = signer::address_of(account); + let (burn_cap, freeze_cap, mint_cap) = initialize_and_register_fake_money(account, 1, true); + let metadata = ensure_paired_metadata(); + + let permissioned_handle = permissioned_signer::create_permissioned_handle(account); + let permissioned_signer = permissioned_signer::signer_from_permissioned_handle(&permissioned_handle); + + // Withdraw with some funds from PFS only. + primary_fungible_store::deposit(account_addr, coin_to_fungible_asset(mint(100, &mint_cap))); + primary_fungible_store::grant_permission(account, &permissioned_signer, metadata, 90); + let coin_2 = withdraw(&permissioned_signer, 100); + burn(coin_2, &burn_cap); + + permissioned_signer::destroy_permissioned_handle(permissioned_handle); + move_to(account, FakeMoneyCapabilities { + burn_cap, + freeze_cap, + mint_cap, + }); + } } diff --git a/aptos-move/framework/aptos-framework/sources/coin.spec.move b/aptos-move/framework/aptos-framework/sources/coin.spec.move index 2564bc0daa8c6..bd126cd117e8e 100644 --- a/aptos-move/framework/aptos-framework/sources/coin.spec.move +++ b/aptos-move/framework/aptos-framework/sources/coin.spec.move @@ -60,6 +60,7 @@ spec aptos_framework::coin { /// spec module { pragma verify = true; + pragma aborts_if_is_partial; global supply: num; global aggregate_supply: num; apply TotalSupplyTracked to * except @@ -119,24 +120,9 @@ spec aptos_framework::coin { ensures [abstract] result == type_info::type_of().account_address; } - /// Can only be initialized once. - /// Can only be published by reserved addresses. - spec initialize_supply_config(aptos_framework: &signer) { - let aptos_addr = signer::address_of(aptos_framework); - aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); - aborts_if exists(aptos_addr); - ensures !global(aptos_addr).allow_upgrades; - ensures exists(aptos_addr); - } - /// Can only be updated by `@aptos_framework`. - spec allow_supply_upgrades(aptos_framework: &signer, allowed: bool) { - modifies global(@aptos_framework); - let aptos_addr = signer::address_of(aptos_framework); - aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); - aborts_if !exists(aptos_addr); - let post allow_upgrades_post = global(@aptos_framework); - ensures allow_upgrades_post.allow_upgrades == allowed; + spec allow_supply_upgrades(_aptos_framework: &signer, _allowed: bool) { + aborts_if true; } spec balance(owner: address): u64 { @@ -174,11 +160,12 @@ spec aptos_framework::coin { } } - spec fun spec_is_account_registered(account_addr: address): bool { - let paired_metadata_opt = spec_paired_metadata(); - exists>(account_addr) || (option::spec_is_some( - paired_metadata_opt - ) && primary_fungible_store::spec_primary_store_exists(account_addr, option::spec_borrow(paired_metadata_opt))) + spec fun spec_is_account_registered(account_addr:address): bool; + + spec is_account_registered(account_addr: address): bool { + pragma aborts_if_is_partial; + aborts_if false; + ensures [abstract] result == spec_is_account_registered(account_addr); } spec schema CoinSubAbortsIf { @@ -338,7 +325,7 @@ spec aptos_framework::coin { aborts_if coin_store.frozen; } - spec force_deposit(account_addr: address, coin: Coin) { + spec deposit_for_gas_fee(account_addr: address, coin: Coin) { // TODO(fa_migration) pragma verify = false; modifies global>(account_addr); @@ -394,32 +381,8 @@ spec aptos_framework::coin { /// The creator of `CoinType` must be `@aptos_framework`. /// `SupplyConfig` allow upgrade. - spec upgrade_supply(account: &signer) { - let account_addr = signer::address_of(account); - let coin_address = type_info::type_of().account_address; - aborts_if coin_address != account_addr; - aborts_if !exists(@aptos_framework); - /// [high-level-req-1.1] - aborts_if !exists>(account_addr); - - let supply_config = global(@aptos_framework); - aborts_if !supply_config.allow_upgrades; - modifies global>(account_addr); - - let maybe_supply = global>(account_addr).supply; - let supply = option::spec_borrow(maybe_supply); - let value = optional_aggregator::optional_aggregator_value(supply); - - let post post_maybe_supply = global>(account_addr).supply; - let post post_supply = option::spec_borrow(post_maybe_supply); - let post post_value = optional_aggregator::optional_aggregator_value(post_supply); - - let supply_no_parallel = option::spec_is_some(maybe_supply) && - !optional_aggregator::is_parallelizable(supply); - - aborts_if supply_no_parallel && !exists(@aptos_framework); - ensures supply_no_parallel ==> - optional_aggregator::is_parallelizable(post_supply) && post_value == value; + spec upgrade_supply(_account: &signer) { + aborts_if true; } spec initialize { @@ -578,46 +541,4 @@ spec aptos_framework::coin { aborts_if coin_store.frozen; aborts_if balance < amount; } - - spec initialize_aggregatable_coin(aptos_framework: &signer): AggregatableCoin { - include system_addresses::AbortsIfNotAptosFramework { account: aptos_framework }; - include aggregator_factory::CreateAggregatorInternalAbortsIf; - } - - spec is_aggregatable_coin_zero(coin: &AggregatableCoin): bool { - aborts_if false; - ensures result == (aggregator::spec_read(coin.value) == 0); - } - - spec drain_aggregatable_coin(coin: &mut AggregatableCoin): Coin { - aborts_if aggregator::spec_read(coin.value) > MAX_U64; - ensures result.value == aggregator::spec_aggregator_get_val(old(coin).value); - } - - spec merge_aggregatable_coin(dst_coin: &mut AggregatableCoin, coin: Coin) { - let aggr = dst_coin.value; - let post p_aggr = dst_coin.value; - aborts_if aggregator::spec_aggregator_get_val(aggr) - + coin.value > aggregator::spec_get_limit(aggr); - aborts_if aggregator::spec_aggregator_get_val(aggr) - + coin.value > MAX_U128; - ensures aggregator::spec_aggregator_get_val(aggr) + coin.value == aggregator::spec_aggregator_get_val(p_aggr); - } - - spec collect_into_aggregatable_coin(account_addr: address, amount: u64, dst_coin: &mut AggregatableCoin) { - // TODO(fa_migration) - pragma verify = false; - let aggr = dst_coin.value; - let post p_aggr = dst_coin.value; - let coin_store = global>(account_addr); - let post p_coin_store = global>(account_addr); - aborts_if amount > 0 && !exists>(account_addr); - aborts_if amount > 0 && coin_store.coin.value < amount; - aborts_if amount > 0 && aggregator::spec_aggregator_get_val(aggr) - + amount > aggregator::spec_get_limit(aggr); - aborts_if amount > 0 && aggregator::spec_aggregator_get_val(aggr) - + amount > MAX_U128; - ensures aggregator::spec_aggregator_get_val(aggr) + amount == aggregator::spec_aggregator_get_val(p_aggr); - ensures coin_store.coin.value - amount == p_coin_store.coin.value; - } } diff --git a/aptos-move/framework/aptos-framework/sources/configs/config_buffer.move b/aptos-move/framework/aptos-framework/sources/configs/config_buffer.move index bbcba84263540..36214ab943557 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/config_buffer.move +++ b/aptos-move/framework/aptos-framework/sources/configs/config_buffer.move @@ -10,6 +10,7 @@ /// /// NOTE: on-chain config `0x1::state::ValidatorSet` implemented its own buffer. module aptos_framework::config_buffer { + use std::error; use std::string::String; use aptos_std::any; use aptos_std::any::Any; @@ -32,6 +33,9 @@ module aptos_framework::config_buffer { /// Config buffer operations failed with permission denied. const ESTD_SIGNER_NEEDED: u64 = 1; + /// Function is deprecated. + const EDEPRECATED: u64 = 2; + struct PendingConfigs has key { configs: SimpleMap, } @@ -65,11 +69,17 @@ module aptos_framework::config_buffer { simple_map::upsert(&mut configs.configs, key, value); } + #[deprecated] + /// Use `extract_v2` instead. + public fun extract(): T { + abort(error::unavailable(EDEPRECATED)) + } + /// Take the buffered config `T` out (buffer cleared). Abort if the buffer is empty. /// Should only be used at the end of a reconfiguration. /// /// Typically used in `X::on_new_epoch()` where X is an on-chaon config. - public fun extract(): T acquires PendingConfigs { + public(friend) fun extract_v2(): T acquires PendingConfigs { let configs = borrow_global_mut(@aptos_framework); let key = type_info::type_name(); let (_, value_packed) = simple_map::remove(&mut configs.configs, &key); @@ -94,7 +104,7 @@ module aptos_framework::config_buffer { // Update and extract should work. upsert(DummyConfig { data: 999 }); assert!(does_exist(), 1); - let config = extract(); + let config = extract_v2(); assert!(config == DummyConfig { data: 999 }, 1); assert!(!does_exist(), 1); } diff --git a/aptos-move/framework/aptos-framework/sources/configs/config_buffer.spec.move b/aptos-move/framework/aptos-framework/sources/configs/config_buffer.spec.move index 26e80269eecf5..f04a83920d464 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/config_buffer.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/config_buffer.spec.move @@ -22,7 +22,7 @@ spec aptos_framework::config_buffer { aborts_if !exists(@aptos_framework); } - spec extract(): T { + spec extract_v2(): T { aborts_if !exists(@aptos_framework); include ExtractAbortsIf; } @@ -32,7 +32,7 @@ spec aptos_framework::config_buffer { let key = type_info::type_name(); aborts_if !simple_map::spec_contains_key(configs.configs, key); include any::UnpackAbortsIf { - x: simple_map::spec_get(configs.configs, key) + self: simple_map::spec_get(configs.configs, key) }; } @@ -51,7 +51,7 @@ spec aptos_framework::config_buffer { let configs = global(@aptos_framework); // TODO(#12015) include spec_fun_does_exist(type_name) ==> any::UnpackAbortsIf { - x: simple_map::spec_get(configs.configs, type_name) + self: simple_map::spec_get(configs.configs, type_name) }; } @@ -61,7 +61,7 @@ spec aptos_framework::config_buffer { let configs = global(@aptos_framework); // TODO(#12015) include spec_fun_does_exist(type_name) ==> any::UnpackRequirement { - x: simple_map::spec_get(configs.configs, type_name) + self: simple_map::spec_get(configs.configs, type_name) }; } diff --git a/aptos-move/framework/aptos-framework/sources/configs/consensus_config.move b/aptos-move/framework/aptos-framework/sources/configs/consensus_config.move index e81049477c8c9..ca7049d3cebd1 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/consensus_config.move +++ b/aptos-move/framework/aptos-framework/sources/configs/consensus_config.move @@ -59,7 +59,7 @@ module aptos_framework::consensus_config { public(friend) fun on_new_epoch(framework: &signer) acquires ConsensusConfig { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let new_config = config_buffer::extract(); + let new_config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_config; } else { diff --git a/aptos-move/framework/aptos-framework/sources/configs/consensus_config.spec.move b/aptos-move/framework/aptos-framework/sources/configs/consensus_config.spec.move index f0360989f47a6..182c19faa46d1 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/consensus_config.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/consensus_config.spec.move @@ -47,15 +47,12 @@ spec aptos_framework::consensus_config { use aptos_framework::chain_status; use aptos_framework::timestamp; use std::signer; - use aptos_framework::stake; use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; use aptos_framework::staking_config; // TODO: set because of timeout (property proved) pragma verify_duration_estimate = 600; - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; let addr = signer::address_of(account); /// [high-level-req-2] @@ -66,7 +63,6 @@ spec aptos_framework::consensus_config { requires chain_status::is_genesis(); requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time(); - requires exists(@aptos_framework); requires exists>(@aptos_framework); ensures global(@aptos_framework).config == config; } diff --git a/aptos-move/framework/aptos-framework/sources/configs/execution_config.move b/aptos-move/framework/aptos-framework/sources/configs/execution_config.move index 6322a6cfe1420..de945c39ac7c7 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/execution_config.move +++ b/aptos-move/framework/aptos-framework/sources/configs/execution_config.move @@ -55,7 +55,7 @@ module aptos_framework::execution_config { public(friend) fun on_new_epoch(framework: &signer) acquires ExecutionConfig { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let config = config_buffer::extract(); + let config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = config; } else { diff --git a/aptos-move/framework/aptos-framework/sources/configs/execution_config.spec.move b/aptos-move/framework/aptos-framework/sources/configs/execution_config.spec.move index 4b04d757f2fdb..381e550a92b06 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/execution_config.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/execution_config.spec.move @@ -10,18 +10,14 @@ spec aptos_framework::execution_config { use aptos_framework::timestamp; use std::signer; use std::features; - use aptos_framework::transaction_fee; use aptos_framework::chain_status; - use aptos_framework::stake; use aptos_framework::staking_config; use aptos_framework::aptos_coin; // TODO: set because of timeout (property proved) pragma verify_duration_estimate = 600; let addr = signer::address_of(account); - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires chain_status::is_genesis(); - requires exists(@aptos_framework); requires exists(@aptos_framework); requires len(config) > 0; include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement; diff --git a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.move b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.move index 9156c1ae2574e..fa6e7f660469a 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.move +++ b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.move @@ -135,7 +135,7 @@ module aptos_framework::gas_schedule { public(friend) fun on_new_epoch(framework: &signer) acquires GasScheduleV2 { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let new_gas_schedule = config_buffer::extract(); + let new_gas_schedule = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_gas_schedule; } else { diff --git a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move index 7ce238f7b9959..ee57cd4c1c3c2 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move @@ -49,19 +49,15 @@ spec aptos_framework::gas_schedule { spec set_gas_schedule(aptos_framework: &signer, gas_schedule_blob: vector) { use std::signer; use aptos_framework::util; - use aptos_framework::stake; use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; use aptos_framework::staking_config; use aptos_framework::chain_status; // TODO: set because of timeout (property proved) pragma verify_duration_estimate = 600; - requires exists(@aptos_framework); requires exists>(@aptos_framework); requires chain_status::is_genesis(); - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; /// [high-level-req-2] @@ -77,18 +73,14 @@ spec aptos_framework::gas_schedule { } spec set_storage_gas_config(aptos_framework: &signer, config: StorageGasConfig) { - use aptos_framework::stake; use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; use aptos_framework::staking_config; // TODO: set because of timeout (property proved). pragma verify_duration_estimate = 600; - requires exists(@aptos_framework); requires exists>(@aptos_framework); include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework }; - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; aborts_if !exists(@aptos_framework); ensures global(@aptos_framework) == config; diff --git a/aptos-move/framework/aptos-framework/sources/configs/jwk_consensus_config.move b/aptos-move/framework/aptos-framework/sources/configs/jwk_consensus_config.move index bba0276e785d6..8d155dd6981d4 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/jwk_consensus_config.move +++ b/aptos-move/framework/aptos-framework/sources/configs/jwk_consensus_config.move @@ -68,7 +68,7 @@ module aptos_framework::jwk_consensus_config { public(friend) fun on_new_epoch(framework: &signer) acquires JWKConsensusConfig { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let new_config = config_buffer::extract(); + let new_config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_config; } else { diff --git a/aptos-move/framework/aptos-framework/sources/configs/randomness_api_v0_config.move b/aptos-move/framework/aptos-framework/sources/configs/randomness_api_v0_config.move index 28466211d20bf..74a39a8a6b773 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/randomness_api_v0_config.move +++ b/aptos-move/framework/aptos-framework/sources/configs/randomness_api_v0_config.move @@ -38,7 +38,7 @@ module aptos_framework::randomness_api_v0_config { public fun on_new_epoch(framework: &signer) acquires RequiredGasDeposit, AllowCustomMaxGasFlag { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let new_config = config_buffer::extract(); + let new_config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_config; } else { @@ -46,7 +46,7 @@ module aptos_framework::randomness_api_v0_config { } }; if (config_buffer::does_exist()) { - let new_config = config_buffer::extract(); + let new_config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_config; } else { diff --git a/aptos-move/framework/aptos-framework/sources/configs/randomness_config.move b/aptos-move/framework/aptos-framework/sources/configs/randomness_config.move index 24916393e8451..90246f1aedb22 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/randomness_config.move +++ b/aptos-move/framework/aptos-framework/sources/configs/randomness_config.move @@ -59,7 +59,7 @@ module aptos_framework::randomness_config { public(friend) fun on_new_epoch(framework: &signer) acquires RandomnessConfig { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let new_config = config_buffer::extract(); + let new_config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_config; } else { diff --git a/aptos-move/framework/aptos-framework/sources/configs/randomness_config_seqnum.move b/aptos-move/framework/aptos-framework/sources/configs/randomness_config_seqnum.move index 174b7fdda8388..71e7e3d855390 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/randomness_config_seqnum.move +++ b/aptos-move/framework/aptos-framework/sources/configs/randomness_config_seqnum.move @@ -38,7 +38,7 @@ module aptos_framework::randomness_config_seqnum { public(friend) fun on_new_epoch(framework: &signer) acquires RandomnessConfigSeqNum { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let new_config = config_buffer::extract(); + let new_config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_config; } else { diff --git a/aptos-move/framework/aptos-framework/sources/configs/staking_config.move b/aptos-move/framework/aptos-framework/sources/configs/staking_config.move index aff41b494f8c9..ecb8fbc1525a2 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/staking_config.move +++ b/aptos-move/framework/aptos-framework/sources/configs/staking_config.move @@ -132,6 +132,17 @@ module aptos_framework::staking_config { rewards_rate_denominator, voting_power_increase_limit, }); + + // Initialize StakingRewardsConfig with the given rewards_rate and rewards_rate_denominator, + // while setting min_rewards_rate and rewards_rate_decrease_rate to 0. + initialize_rewards( + aptos_framework, + fixed_point64::create_from_rational((rewards_rate as u128), (rewards_rate_denominator as u128)), + fixed_point64::create_from_rational(0, 1000), + ONE_YEAR_IN_SECS, + 0, + fixed_point64::create_from_rational(0, 1000), + ); } #[view] @@ -395,7 +406,9 @@ module aptos_framework::staking_config { #[test(aptos_framework = @aptos_framework)] public entry fun test_change_staking_configs(aptos_framework: signer) acquires StakingConfig { - initialize(&aptos_framework, 0, 1, 1, false, 1, 1, 1); + initialize_for_test(&aptos_framework, 0, 1, 1, false, 1, 1, 1); + // This test case checks the behavior when the periodical_reward_rate_decrease feature is disabled. + features::change_feature_flags_for_testing(&aptos_framework, vector[], vector[features::get_periodical_reward_rate_decrease_feature()]); update_required_stake(&aptos_framework, 100, 1000); update_recurring_lockup_duration_secs(&aptos_framework, 10000); @@ -501,9 +514,11 @@ module aptos_framework::staking_config { update_recurring_lockup_duration_secs(&account, 1); } - #[test(account = @0x123)] + #[test(aptos_framework = @0x1, account = @0x123)] #[expected_failure(abort_code = 0x50003, location = aptos_framework::system_addresses)] - public entry fun test_update_rewards_unauthorized_should_fail(account: signer) acquires StakingConfig { + public entry fun test_update_rewards_unauthorized_should_fail(aptos_framework: signer, account: signer) acquires StakingConfig { + // This test case checks the behavior when the periodical_reward_rate_decrease feature is disabled. + features::change_feature_flags_for_testing(&aptos_framework, vector[], vector[features::get_periodical_reward_rate_decrease_feature()]); update_rewards_rate(&account, 1, 10); } @@ -547,6 +562,8 @@ module aptos_framework::staking_config { #[test(aptos_framework = @aptos_framework)] #[expected_failure(abort_code = 0x10002, location = Self)] public entry fun test_update_rewards_invalid_denominator_should_fail(aptos_framework: signer) acquires StakingConfig { + // This test case checks the behavior when the periodical_reward_rate_decrease feature is disabled. + features::change_feature_flags_for_testing(&aptos_framework, vector[], vector[features::get_periodical_reward_rate_decrease_feature()]); update_rewards_rate(&aptos_framework, 1, 0); } @@ -625,6 +642,8 @@ module aptos_framework::staking_config { public entry fun test_update_voting_power_increase_limit_to_zero_should_fail( aptos_framework: signer ) acquires StakingConfig { + // This test case checks the behavior when the periodical_reward_rate_decrease feature is disabled. + features::change_feature_flags_for_testing(&aptos_framework, vector[], vector[features::get_periodical_reward_rate_decrease_feature()]); update_voting_power_increase_limit(&aptos_framework, 0); } diff --git a/aptos-move/framework/aptos-framework/sources/configs/staking_config.spec.move b/aptos-move/framework/aptos-framework/sources/configs/staking_config.spec.move index 270abef9cec86..881a8344628c2 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/staking_config.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/staking_config.spec.move @@ -58,6 +58,7 @@ spec aptos_framework::staking_config { spec module { use aptos_framework::chain_status; invariant [suspendable] chain_status::is_operating() ==> exists(@aptos_framework); + invariant [suspendable] chain_status::is_operating() ==> exists(@aptos_framework); pragma verify = true; pragma aborts_if_is_strict; } @@ -106,6 +107,7 @@ spec aptos_framework::staking_config { ) { use std::signer; let addr = signer::address_of(aptos_framework); + requires exists(@aptos_framework); /// [high-level-req-1.1] aborts_if addr != @aptos_framework; aborts_if minimum_stake > maximum_stake || maximum_stake == 0; @@ -117,7 +119,9 @@ spec aptos_framework::staking_config { aborts_if rewards_rate > MAX_REWARDS_RATE; aborts_if rewards_rate > rewards_rate_denominator; aborts_if exists(addr); + aborts_if exists(addr); ensures exists(addr); + ensures exists(addr); } /// Caller must be @aptos_framework. diff --git a/aptos-move/framework/aptos-framework/sources/configs/version.move b/aptos-move/framework/aptos-framework/sources/configs/version.move index fa90eb44ea8a0..6747c717c3884 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/version.move +++ b/aptos-move/framework/aptos-framework/sources/configs/version.move @@ -67,7 +67,7 @@ module aptos_framework::version { public(friend) fun on_new_epoch(framework: &signer) acquires Version { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let new_value = config_buffer::extract(); + let new_value = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_value; } else { diff --git a/aptos-move/framework/aptos-framework/sources/configs/version.spec.move b/aptos-move/framework/aptos-framework/sources/configs/version.spec.move index 5ce2685d1158b..76cb72c5fad68 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/version.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/version.spec.move @@ -26,20 +26,16 @@ spec aptos_framework::version { use std::signer; use aptos_framework::chain_status; use aptos_framework::timestamp; - use aptos_framework::stake; use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; use aptos_framework::staking_config; use aptos_framework::reconfiguration; // TODO: set because of timeout (property proved) pragma verify_duration_estimate = 120; - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; requires chain_status::is_genesis(); requires timestamp::spec_now_microseconds() >= reconfiguration::last_reconfiguration_time(); - requires exists(@aptos_framework); requires exists>(@aptos_framework); aborts_if !exists(signer::address_of(account)); diff --git a/aptos-move/framework/aptos-framework/sources/create_signer.move b/aptos-move/framework/aptos-framework/sources/create_signer.move index 3da0c50c904f0..8bea49055b469 100644 --- a/aptos-move/framework/aptos-framework/sources/create_signer.move +++ b/aptos-move/framework/aptos-framework/sources/create_signer.move @@ -14,8 +14,11 @@ module aptos_framework::create_signer { friend aptos_framework::coin; friend aptos_framework::fungible_asset; friend aptos_framework::genesis; + friend aptos_framework::account_abstraction; friend aptos_framework::multisig_account; friend aptos_framework::object; + friend aptos_framework::permissioned_signer; + friend aptos_framework::transaction_validation; public(friend) native fun create_signer(addr: address): signer; } diff --git a/aptos-move/framework/aptos-framework/sources/create_signer.spec.move b/aptos-move/framework/aptos-framework/sources/create_signer.spec.move index 1bb4c0ffa9fd6..dab59d30da2db 100644 --- a/aptos-move/framework/aptos-framework/sources/create_signer.spec.move +++ b/aptos-move/framework/aptos-framework/sources/create_signer.spec.move @@ -41,5 +41,8 @@ spec aptos_framework::create_signer { pragma opaque; aborts_if [abstract] false; ensures [abstract] signer::address_of(result) == addr; + ensures [abstract] result == spec_create_signer(addr); } + + spec fun spec_create_signer(addr: address): signer; } diff --git a/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.move b/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.move new file mode 100644 index 0000000000000..f30d8c1922bd1 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/datastructures/big_ordered_map.move @@ -0,0 +1,2274 @@ +/// This module provides an implementation for an big ordered map. +/// Big means that it is stored across multiple resources, and doesn't have an +/// upper limit on number of elements it can contain. +/// +/// Keys point to values, and each key in the map must be unique. +/// +/// Currently, one implementation is provided - BPlusTreeMap, backed by a B+Tree, +/// with each node being a separate resource, internally containing OrderedMap. +/// +/// BPlusTreeMap is chosen since the biggest (performance and gast) +/// costs are reading resources, and it: +/// * reduces number of resource accesses +/// * reduces number of rebalancing operations, and makes each rebalancing +/// operation touch only few resources +/// * it allows for parallelism for keys that are not close to each other, +/// once it contains enough keys +/// +/// Note: Default configuration (used in `new_with_config(0, 0, false)`) allows for keys and values of up to 5KB, +/// or 100 times the first (key, value), to satisfy general needs. +/// If you need larger, use other constructor methods. +/// Based on initial configuration, BigOrderedMap will always accept insertion of keys and values +/// up to the allowed size, and will abort with EKEY_BYTES_TOO_LARGE or EARGUMENT_BYTES_TOO_LARGE. +/// +/// TODO: all iterator functions are public(friend) for now, so that they can be modified in a +/// backward incompatible way. Type is also named IteratorPtr, so that Iterator is free to use later. +/// They are waiting for Move improvement that will allow references to be part of the struct, +/// allowing cleaner iterator APIs. +module aptos_std::big_ordered_map { + use std::error; + use std::vector; + use std::option::{Self as option, Option}; + use std::bcs; + use aptos_std::ordered_map::{Self, OrderedMap}; + use aptos_std::cmp; + use aptos_std::storage_slots_allocator::{Self, StorageSlotsAllocator, StoredSlot}; + use aptos_std::math64::{max, min}; + + // Error constants shared with ordered_map (so try using same values) + + /// Map key already exists + const EKEY_ALREADY_EXISTS: u64 = 1; + /// Map key is not found + const EKEY_NOT_FOUND: u64 = 2; + /// Trying to do an operation on an IteratorPtr that would go out of bounds + const EITER_OUT_OF_BOUNDS: u64 = 3; + + // Error constants specific to big_ordered_map + + /// The provided configuration parameter is invalid. + const EINVALID_CONFIG_PARAMETER: u64 = 11; + /// Map isn't empty + const EMAP_NOT_EMPTY: u64 = 12; + /// Trying to insert too large of an (key, value) into the map. + const EARGUMENT_BYTES_TOO_LARGE: u64 = 13; + /// borrow_mut requires that key and value types have constant size + /// (otherwise it wouldn't be able to guarantee size requirements are not violated) + /// Use remove() + add() combo instead. + const EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE: u64 = 14; + /// Trying to insert too large of a key into the map. + const EKEY_BYTES_TOO_LARGE: u64 = 15; + + /// Cannot use new/new_with_reusable with variable-sized types. + /// Use `new_with_type_size_hints()` or `new_with_config()` instead if your types have variable sizes. + /// `new_with_config(0, 0, false)` tries to work reasonably well for variety of sizes + /// (allows keys or values of at least 5KB and 100x larger than the first inserted) + const ECANNOT_USE_NEW_WITH_VARIABLE_SIZED_TYPES: u64 = 16; + + // Errors that should never be thrown + + /// Internal errors. + const EINTERNAL_INVARIANT_BROKEN: u64 = 20; + + // Internal constants. + + // Bounds on degrees: + + /// Smallest allowed degree on inner nodes. + const INNER_MIN_DEGREE: u16 = 4; + /// Smallest allowed degree on leaf nodes. + /// + /// We rely on 1 being valid size only for root node, + /// so this cannot be below 3 (unless that is changed) + const LEAF_MIN_DEGREE: u16 = 3; + /// Largest degree allowed (both for inner and leaf nodes) + const MAX_DEGREE: u64 = 4096; + + // Bounds on serialized sizes: + + /// Largest size all keys for inner nodes or key-value pairs for leaf nodes can have. + /// Node itself can be a bit larger, due to few other accounting fields. + /// This is a bit conservative, a bit less than half of the resource limit (which is 1MB) + const MAX_NODE_BYTES: u64 = 400 * 1024; + /// Target node size, from efficiency perspective. + const DEFAULT_TARGET_NODE_SIZE: u64 = 4096; + + /// When using default constructors (new() / new_with_reusable() / new_with_config(0, 0, _)) + /// making sure key or value of this size (5KB) will be accepted, which should satisfy most cases + /// If you need keys/values that are larger, use other constructors. + const DEFAULT_MAX_KEY_OR_VALUE_SIZE: u64 = 5 * 1024; // 5KB + + /// Target max node size, when using hints (via new_with_type_size_hints). + /// Smaller than MAX_NODE_BYTES, to improve performence, as large nodes are innefficient. + const HINT_MAX_NODE_BYTES: u64 = 128 * 1024; + + // Constants aligned with storage_slots_allocator + const NULL_INDEX: u64 = 0; + const ROOT_INDEX: u64 = 1; + + /// A node of the BigOrderedMap. + /// + /// Inner node will have all children be Child::Inner, pointing to the child nodes. + /// Leaf node will have all children be Child::Leaf. + /// Basically - Leaf node is a single-resource OrderedMap, containing as much key/value entries, as can fit. + /// So Leaf node contains multiple values, not just one. + enum Node has store { + V1 { + // Whether this node is a leaf node. + is_leaf: bool, + // The children of the nodes. + // When node is inner node, K represents max_key within the child subtree, and values are Child::Inner. + // When the node is leaf node, K represents key of the leaf, and values are Child::Leaf. + children: OrderedMap>, + // The node index of its previous node at the same level, or `NULL_INDEX` if it doesn't have a previous node. + prev: u64, + // The node index of its next node at the same level, or `NULL_INDEX` if it doesn't have a next node. + next: u64, + } + } + + /// Contents of a child node. + enum Child has store { + Inner { + // The node index of it's child + node_index: StoredSlot, + }, + Leaf { + // Value associated with the leaf node. + value: V, + } + } + + /// An iterator to iterate all keys in the BigOrderedMap. + /// + /// TODO: Once fields can be (mutable) references, this class will be deprecated. + enum IteratorPtr has copy, drop { + End, + Some { + /// The node index of the iterator pointing to. + node_index: u64, + + /// Child iter it is pointing to + child_iter: ordered_map::IteratorPtr, + + /// `key` to which `(node_index, child_iter)` are pointing to + /// cache to not require borrowing global resources to fetch again + key: K, + }, + } + + /// The BigOrderedMap data structure. + enum BigOrderedMap has store { + BPlusTreeMap { + /// Root node. It is stored directly in the resource itself, unlike all other nodes. + root: Node, + /// Storage of all non-root nodes. They are stored in separate storage slots. + nodes: StorageSlotsAllocator>, + /// The node index of the leftmost node. + min_leaf_index: u64, + /// The node index of the rightmost node. + max_leaf_index: u64, + + /// Whether Key and Value have constant serialized size, and if so, + /// optimize out size checks on every insert. + constant_kv_size: bool, + /// The max number of children an inner node can have. + inner_max_degree: u16, + /// The max number of children a leaf node can have. + leaf_max_degree: u16, + } + } + + // ======================= Constructors && Destructors ==================== + + /// Returns a new BigOrderedMap with the default configuration. + /// + /// Cannot be used with variable-sized types. + /// Use `new_with_type_size_hints()` or `new_with_config()` instead if your types have variable sizes. + /// `new_with_config(0, 0, false)` tries to work reasonably well for variety of sizes + /// (allows keys or values of at least 5KB and 100x larger than the first inserted) + public fun new(): BigOrderedMap { + assert!( + bcs::constant_serialized_size().is_some() && bcs::constant_serialized_size().is_some(), + error::invalid_argument(ECANNOT_USE_NEW_WITH_VARIABLE_SIZED_TYPES) + ); + new_with_config(0, 0, false) + } + + /// Returns a new BigOrderedMap with with reusable storage slots. + /// + /// Cannot be used with variable-sized types. + /// Use `new_with_type_size_hints()` or `new_with_config()` instead if your types have variable sizes. + /// `new_with_config(0, 0, false)` tries to work reasonably well for variety of sizes + /// (allows keys or values of at least 5KB and 100x larger than the first inserted) + public fun new_with_reusable(): BigOrderedMap { + assert!( + bcs::constant_serialized_size().is_some() && bcs::constant_serialized_size().is_some(), + error::invalid_argument(ECANNOT_USE_NEW_WITH_VARIABLE_SIZED_TYPES) + ); + new_with_config(0, 0, true) + } + + /// Returns a new BigOrderedMap, configured based on passed key and value serialized size hints. + public fun new_with_type_size_hints(avg_key_bytes: u64, max_key_bytes: u64, avg_value_bytes: u64, max_value_bytes: u64): BigOrderedMap { + assert!(avg_key_bytes <= max_key_bytes, error::invalid_argument(EINVALID_CONFIG_PARAMETER)); + assert!(avg_value_bytes <= max_value_bytes, error::invalid_argument(EINVALID_CONFIG_PARAMETER)); + + let inner_max_degree_from_avg = max(min(MAX_DEGREE, DEFAULT_TARGET_NODE_SIZE / avg_key_bytes), INNER_MIN_DEGREE as u64); + let inner_max_degree_from_max = HINT_MAX_NODE_BYTES / max_key_bytes; + assert!(inner_max_degree_from_max >= (INNER_MIN_DEGREE as u64), error::invalid_argument(EINVALID_CONFIG_PARAMETER)); + + let avg_entry_size = avg_key_bytes + avg_value_bytes; + let max_entry_size = max_key_bytes + max_value_bytes; + + let leaf_max_degree_from_avg = max(min(MAX_DEGREE, DEFAULT_TARGET_NODE_SIZE / avg_entry_size), LEAF_MIN_DEGREE as u64); + let leaf_max_degree_from_max = HINT_MAX_NODE_BYTES / max_entry_size; + assert!(leaf_max_degree_from_max >= (INNER_MIN_DEGREE as u64), error::invalid_argument(EINVALID_CONFIG_PARAMETER)); + + new_with_config( + min(inner_max_degree_from_avg, inner_max_degree_from_max) as u16, + min(leaf_max_degree_from_avg, leaf_max_degree_from_max) as u16, + false, + ) + } + + /// Returns a new BigOrderedMap with the provided max degree consts (the maximum # of children a node can have, both inner and leaf). + /// + /// If 0 is passed, then it is dynamically computed based on size of first key and value. + /// WIth 0 it is configured to accept keys and values up to 5KB in size, + /// or as large as 100x the size of the first insert. (100 = MAX_NODE_BYTES / DEFAULT_TARGET_NODE_SIZE) + /// + /// Sizes of all elements must respect (or their additions will be rejected): + /// `key_size * inner_max_degree <= MAX_NODE_BYTES` + /// `entry_size * leaf_max_degree <= MAX_NODE_BYTES` + /// If keys or values have variable size, and first element could be non-representative in size (i.e. smaller than future ones), + /// it is important to compute and pass inner_max_degree and leaf_max_degree based on the largest element you want to be able to insert. + /// + /// `reuse_slots` means that removing elements from the map doesn't free the storage slots and returns the refund. + /// Together with `allocate_spare_slots`, it allows to preallocate slots and have inserts have predictable gas costs. + /// (otherwise, inserts that require map to add new nodes, cost significantly more, compared to the rest) + public fun new_with_config(inner_max_degree: u16, leaf_max_degree: u16, reuse_slots: bool): BigOrderedMap { + assert!(inner_max_degree == 0 || (inner_max_degree >= INNER_MIN_DEGREE && (inner_max_degree as u64) <= MAX_DEGREE), error::invalid_argument(EINVALID_CONFIG_PARAMETER)); + assert!(leaf_max_degree == 0 || (leaf_max_degree >= LEAF_MIN_DEGREE && (leaf_max_degree as u64) <= MAX_DEGREE), error::invalid_argument(EINVALID_CONFIG_PARAMETER)); + + // Assert that storage_slots_allocator special indices are aligned: + assert!(storage_slots_allocator::is_null_index(NULL_INDEX), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + assert!(storage_slots_allocator::is_special_unused_index(ROOT_INDEX), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + let nodes = storage_slots_allocator::new(reuse_slots); + + let self = BigOrderedMap::BPlusTreeMap { + root: new_node(/*is_leaf=*/true), + nodes: nodes, + min_leaf_index: ROOT_INDEX, + max_leaf_index: ROOT_INDEX, + constant_kv_size: false, // Will be initialized in validate_static_size_and_init_max_degrees below. + inner_max_degree: inner_max_degree, + leaf_max_degree: leaf_max_degree + }; + self.validate_static_size_and_init_max_degrees(); + self + } + + /// Create a BigOrderedMap from a vector of keys and values, with default configuration. + /// Aborts with EKEY_ALREADY_EXISTS if duplicate keys are passed in. + public fun new_from(keys: vector, values: vector): BigOrderedMap { + let map = new(); + map.add_all(keys, values); + map + } + + /// Destroys the map if it's empty, otherwise aborts. + public fun destroy_empty(self: BigOrderedMap) { + let BigOrderedMap::BPlusTreeMap { root, nodes, min_leaf_index: _, max_leaf_index: _, constant_kv_size: _, inner_max_degree: _, leaf_max_degree: _ } = self; + root.destroy_empty_node(); + // If root node is empty, then we know that no storage slots are used, + // and so we can safely destroy all nodes. + nodes.destroy_empty(); + } + + /// Map was created with reuse_slots=true, you can allocate spare slots, to pay storage fee now, to + /// allow future insertions to not require any storage slot creation - making their gas more predictable + /// and better bounded/fair. + /// (otherwsie, unlucky inserts create new storage slots and are charge more for it) + public fun allocate_spare_slots(self: &mut BigOrderedMap, num_to_allocate: u64) { + self.nodes.allocate_spare_slots(num_to_allocate) + } + + /// Returns true iff the BigOrderedMap is empty. + public fun is_empty(self: &BigOrderedMap): bool { + let node = self.borrow_node(self.min_leaf_index); + node.children.is_empty() + } + + /// Returns the number of elements in the BigOrderedMap. + /// This is an expensive function, as it goes through all the leaves to compute it. + public fun compute_length(self: &BigOrderedMap): u64 { + let size = 0; + self.for_each_leaf_node_ref(|node| { + size += node.children.length(); + }); + size + } + + // ======================= Section with Modifiers ========================= + + /// Inserts the key/value into the BigOrderedMap. + /// Aborts if the key is already in the map. + public fun add(self: &mut BigOrderedMap, key: K, value: V) { + self.add_or_upsert_impl(key, value, false).destroy_none() + } + + /// If the key doesn't exist in the map, inserts the key/value, and returns none. + /// Otherwise updates the value under the given key, and returns the old value. + public fun upsert(self: &mut BigOrderedMap, key: K, value: V): Option { + let result = self.add_or_upsert_impl(key, value, true); + if (result.is_some()) { + let Child::Leaf { + value: old_value, + } = result.destroy_some(); + option::some(old_value) + } else { + result.destroy_none(); + option::none() + } + } + + /// Removes the entry from BigOrderedMap and returns the value which `key` maps to. + /// Aborts if there is no entry for `key`. + public fun remove(self: &mut BigOrderedMap, key: &K): V { + // Optimize case where only root node exists + // (optimizes out borrowing and path creation in `find_leaf_path`) + if (self.root.is_leaf) { + let Child::Leaf { + value, + } = self.root.children.remove(key); + return value; + }; + + let path_to_leaf = self.find_leaf_path(key); + + assert!(!path_to_leaf.is_empty(), error::invalid_argument(EKEY_NOT_FOUND)); + + let Child::Leaf { + value, + } = self.remove_at(path_to_leaf, key); + value + } + + /// Add multiple key/value pairs to the map. The keys must not already exist. + /// Aborts with EKEY_ALREADY_EXISTS if key already exist, or duplicate keys are passed in. + public fun add_all(self: &mut BigOrderedMap, keys: vector, values: vector) { + // TODO: Can be optimized, both in insertion order (largest first, then from smallest), + // as well as on initializing inner_max_degree/leaf_max_degree better + keys.zip(values, |key, value| { + self.add(key, value); + }); + } + + public fun pop_front(self: &mut BigOrderedMap): (K, V) { + let it = self.new_begin_iter(); + let k = *it.iter_borrow_key(); + let v = self.remove(&k); + (k, v) + } + + public fun pop_back(self: &mut BigOrderedMap): (K, V) { + let it = self.new_end_iter().iter_prev(self); + let k = *it.iter_borrow_key(); + let v = self.remove(&k); + (k, v) + } + + // ============================= Accessors ================================ + + /// Returns an iterator pointing to the first element that is greater or equal to the provided + /// key, or an end iterator if such element doesn't exist. + public(friend) fun lower_bound(self: &BigOrderedMap, key: &K): IteratorPtr { + let leaf = self.find_leaf(key); + if (leaf == NULL_INDEX) { + return self.new_end_iter() + }; + + let node = self.borrow_node(leaf); + assert!(node.is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + let child_lower_bound = node.children.lower_bound(key); + if (child_lower_bound.iter_is_end(&node.children)) { + self.new_end_iter() + } else { + let iter_key = *child_lower_bound.iter_borrow_key(&node.children); + new_iter(leaf, child_lower_bound, iter_key) + } + } + + /// Returns an iterator pointing to the element that equals to the provided key, or an end + /// iterator if the key is not found. + public(friend) fun find(self: &BigOrderedMap, key: &K): IteratorPtr { + let lower_bound = self.lower_bound(key); + if (lower_bound.iter_is_end(self)) { + lower_bound + } else if (&lower_bound.key == key) { + lower_bound + } else { + self.new_end_iter() + } + } + + /// Returns true iff the key exists in the map. + public fun contains(self: &BigOrderedMap, key: &K): bool { + let lower_bound = self.lower_bound(key); + if (lower_bound.iter_is_end(self)) { + false + } else if (&lower_bound.key == key) { + true + } else { + false + } + } + + /// Returns a reference to the element with its key, aborts if the key is not found. + public fun borrow(self: &BigOrderedMap, key: &K): &V { + let iter = self.find(key); + assert!(!iter.iter_is_end(self), error::invalid_argument(EKEY_NOT_FOUND)); + + iter.iter_borrow(self) + } + + public fun get(self: &BigOrderedMap, key: &K): Option { + let iter = self.find(key); + if (iter.iter_is_end(self)) { + option::none() + } else { + option::some(*iter.iter_borrow(self)) + } + } + + /// Returns a mutable reference to the element with its key at the given index, aborts if the key is not found. + /// Aborts with EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE if KV size doesn't have constant size, + /// because if it doesn't we cannot assert invariants on the size. + /// In case of variable size, use either `borrow`, `copy` then `upsert`, or `remove` and `add` instead of mutable borrow. + public fun borrow_mut(self: &mut BigOrderedMap, key: &K): &mut V { + let iter = self.find(key); + assert!(!iter.iter_is_end(self), error::invalid_argument(EKEY_NOT_FOUND)); + iter.iter_borrow_mut(self) + } + public fun borrow_front(self: &BigOrderedMap): (K, &V) { + let it = self.new_begin_iter(); + let key = *it.iter_borrow_key(); + (key, it.iter_borrow(self)) + } + + public fun borrow_back(self: &BigOrderedMap): (K, &V) { + let it = self.new_end_iter().iter_prev(self); + let key = *it.iter_borrow_key(); + (key, it.iter_borrow(self)) + } + + public fun prev_key(self: &BigOrderedMap, key: &K): Option { + let it = self.lower_bound(key); + if (it.iter_is_begin(self)) { + option::none() + } else { + option::some(*it.iter_prev(self).iter_borrow_key()) + } + } + + public fun next_key(self: &BigOrderedMap, key: &K): Option { + let it = self.lower_bound(key); + if (it.iter_is_end(self)) { + option::none() + } else { + let cur_key = it.iter_borrow_key(); + if (key == cur_key) { + let it = it.iter_next(self); + if (it.iter_is_end(self)) { + option::none() + } else { + option::some(*it.iter_borrow_key()) + } + } else { + option::some(*cur_key) + } + } + } + + // =========================== Views and Traversals ============================== + + /// Convert a BigOrderedMap to an OrderedMap, which is supposed to be called mostly by view functions to get an atomic + /// view of the whole map. + /// Disclaimer: This function may be costly as the BigOrderedMap may be huge in size. Use it at your own discretion. + public fun to_ordered_map(self: &BigOrderedMap): OrderedMap { + let result = ordered_map::new(); + self.for_each_ref_friend(|k, v| { + result.new_end_iter().iter_add(&mut result, *k, *v); + }); + result + } + + /// Get all keys. + /// + /// For a large enough BigOrderedMap this function will fail due to execution gas limits, + /// use iterartor or next_key/prev_key to iterate over across portion of the map. + public fun keys(self: &BigOrderedMap): vector { + let result = vector[]; + self.for_each_ref_friend(|k, _v| { + result.push_back(*k); + }); + result + } + + /// Apply the function to each element in the vector, consuming it, leaving the map empty. + /// + /// Current implementation is O(n * log(n)). After function values will be optimized + /// to O(n). + public inline fun for_each_and_clear(self: &mut BigOrderedMap, f: |K, V|) { + // TODO - this can be done more efficiently, by destroying the leaves directly + // but that requires more complicated code and testing. + while (!self.is_empty()) { + let (k, v) = self.pop_front(); + f(k, v); + }; + } + + /// Apply the function to each element in the vector, consuming it, and consuming the map + /// + /// Current implementation is O(n * log(n)). After function values will be optimized + /// to O(n). + public inline fun for_each(self: BigOrderedMap, f: |K, V|) { + // TODO - this can be done more efficiently, by destroying the leaves directly + // but that requires more complicated code and testing. + self.for_each_and_clear(|k, v| f(k, v)); + self.destroy_empty() + } + + /// Apply the function to a reference of each element in the vector. + /// + /// Current implementation is O(n * log(n)). After function values will be optimized + /// to O(n). + public inline fun for_each_ref(self: &BigOrderedMap, f: |&K, &V|) { + // This implementation is innefficient: O(log(n)) for next_key / borrow lookups every time, + // but is the only one available through the public API. + if (!self.is_empty()) { + let (k, v) = self.borrow_front(); + f(&k, v); + + let cur_k = self.next_key(&k); + while (cur_k.is_some()) { + let k = cur_k.destroy_some(); + f(&k, self.borrow(&k)); + + cur_k = self.next_key(&k); + }; + }; + + // TODO use this more efficient implementation when function values are enabled. + // self.for_each_leaf_node_ref(|node| { + // node.children.for_each_ref(|k: &K, v: &Child| { + // f(k, &v.value); + // }); + // }) + } + + // TODO: Temporary friend implementaiton, until for_each_ref can be made efficient. + public(friend) inline fun for_each_ref_friend(self: &BigOrderedMap, f: |&K, &V|) { + self.for_each_leaf_node_ref(|node| { + node.children.for_each_ref_friend(|k: &K, v: &Child| { + f(k, &v.value); + }); + }) + } + + /// Apply the function to a mutable reference of each key-value pair in the map. + /// + /// Current implementation is O(n * log(n)). After function values will be optimized + /// to O(n). + public inline fun for_each_mut(self: &mut BigOrderedMap, f: |&K, &mut V|) { + // This implementation is innefficient: O(log(n)) for next_key / borrow lookups every time, + // but is the only one available through the public API. + if (!self.is_empty()) { + let (k, _v) = self.borrow_front(); + + let done = false; + while (!done) { + f(&k, self.borrow_mut(&k)); + + let cur_k = self.next_key(&k); + if (cur_k.is_some()) { + k = cur_k.destroy_some(); + } else { + done = true; + } + }; + }; + + // TODO: if we make iterator api public update to: + // let iter = self.new_begin_iter(); + // while (!iter.iter_is_end(self)) { + // let key = *iter.iter_borrow_key(self); + // f(key, iter.iter_borrow_mut(self)); + // iter = iter.iter_next(self); + // } + } + + /// Destroy a map, by destroying elements individually. + /// + /// Current implementation is O(n * log(n)). After function values will be optimized + /// to O(n). + public inline fun destroy(self: BigOrderedMap, dv: |V|) { + self.for_each(|_k, v| { + dv(v); + }); + } + + // ========================= IteratorPtr functions =========================== + + /// Returns the begin iterator. + public(friend) fun new_begin_iter(self: &BigOrderedMap): IteratorPtr { + if (self.is_empty()) { + return IteratorPtr::End; + }; + + let node = self.borrow_node(self.min_leaf_index); + assert!(!node.children.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + let begin_child_iter = node.children.new_begin_iter(); + let begin_child_key = *begin_child_iter.iter_borrow_key(&node.children); + new_iter(self.min_leaf_index, begin_child_iter, begin_child_key) + } + + /// Returns the end iterator. + public(friend) fun new_end_iter(self: &BigOrderedMap): IteratorPtr { + IteratorPtr::End + } + + // Returns true iff the iterator is a begin iterator. + public(friend) fun iter_is_begin(self: &IteratorPtr, map: &BigOrderedMap): bool { + if (self is IteratorPtr::End) { + map.is_empty() + } else { + (self.node_index == map.min_leaf_index && self.child_iter.iter_is_begin_from_non_empty()) + } + } + + // Returns true iff the iterator is an end iterator. + public(friend) fun iter_is_end(self: &IteratorPtr, _map: &BigOrderedMap): bool { + self is IteratorPtr::End + } + + /// Borrows the key given iterator points to. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_borrow_key(self: &IteratorPtr): &K { + assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + &self.key + } + + /// Borrows the value given iterator points to. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_borrow(self: IteratorPtr, map: &BigOrderedMap): &V { + assert!(!self.iter_is_end(map), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + let IteratorPtr::Some { node_index, child_iter, key: _ } = self; + let children = &map.borrow_node(node_index).children; + &child_iter.iter_borrow(children).value + } + + /// Mutably borrows the value iterator points to. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Aborts with EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE if KV size doesn't have constant size, + /// because if it doesn't we cannot assert invariants on the size. + /// In case of variable size, use either `borrow`, `copy` then `upsert`, or `remove` and `add` instead of mutable borrow. + /// + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_borrow_mut(self: IteratorPtr, map: &mut BigOrderedMap): &mut V { + assert!(map.constant_kv_size || bcs::constant_serialized_size().is_some(), error::invalid_argument(EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE)); + assert!(!self.iter_is_end(map), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + let IteratorPtr::Some { node_index, child_iter, key: _ } = self; + let children = &mut map.borrow_node_mut(node_index).children; + &mut child_iter.iter_borrow_mut(children).value + } + + /// Returns the next iterator. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Requires the map is not changed after the input iterator is generated. + public(friend) fun iter_next(self: IteratorPtr, map: &BigOrderedMap): IteratorPtr { + assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + + let node_index = self.node_index; + let node = map.borrow_node(node_index); + + let child_iter = self.child_iter.iter_next(&node.children); + if (!child_iter.iter_is_end(&node.children)) { + // next is in the same leaf node + let iter_key = *child_iter.iter_borrow_key(&node.children); + return new_iter(node_index, child_iter, iter_key); + }; + + // next is in a different leaf node + let next_index = node.next; + if (next_index != NULL_INDEX) { + let next_node = map.borrow_node(next_index); + + let child_iter = next_node.children.new_begin_iter(); + assert!(!child_iter.iter_is_end(&next_node.children), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + let iter_key = *child_iter.iter_borrow_key(&next_node.children); + return new_iter(next_index, child_iter, iter_key); + }; + + map.new_end_iter() + } + + /// Returns the previous iterator. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the beginning. + /// Requires the map is not changed after the input iterator is generated. + public(friend) fun iter_prev(self: IteratorPtr, map: &BigOrderedMap): IteratorPtr { + let prev_index = if (self is IteratorPtr::End) { + map.max_leaf_index + } else { + let node_index = self.node_index; + let node = map.borrow_node(node_index); + + if (!self.child_iter.iter_is_begin(&node.children)) { + // next is in the same leaf node + let child_iter = self.child_iter.iter_prev(&node.children); + let key = *child_iter.iter_borrow_key(&node.children); + return new_iter(node_index, child_iter, key); + }; + node.prev + }; + + assert!(prev_index != NULL_INDEX, error::invalid_argument(EITER_OUT_OF_BOUNDS)); + + // next is in a different leaf node + let prev_node = map.borrow_node(prev_index); + + let prev_children = &prev_node.children; + let child_iter = prev_children.new_end_iter().iter_prev(prev_children); + let iter_key = *child_iter.iter_borrow_key(prev_children); + new_iter(prev_index, child_iter, iter_key) + } + + // ====================== Internal Implementations ======================== + + inline fun for_each_leaf_node_ref(self: &BigOrderedMap, f: |&Node|) { + let cur_node_index = self.min_leaf_index; + + while (cur_node_index != NULL_INDEX) { + let node = self.borrow_node(cur_node_index); + f(node); + cur_node_index = node.next; + } + } + + /// Borrow a node, given an index. Works for both root (i.e. inline) node and separately stored nodes + inline fun borrow_node(self: &BigOrderedMap, node_index: u64): &Node { + if (node_index == ROOT_INDEX) { + &self.root + } else { + self.nodes.borrow(node_index) + } + } + + /// Borrow a node mutably, given an index. Works for both root (i.e. inline) node and separately stored nodes + inline fun borrow_node_mut(self: &mut BigOrderedMap, node_index: u64): &mut Node { + if (node_index == ROOT_INDEX) { + &mut self.root + } else { + self.nodes.borrow_mut(node_index) + } + } + + fun add_or_upsert_impl(self: &mut BigOrderedMap, key: K, value: V, allow_overwrite: bool): Option> { + if (!self.constant_kv_size) { + self.validate_dynamic_size_and_init_max_degrees(&key, &value); + }; + + // Optimize case where only root node exists + // (optimizes out borrowing and path creation in `find_leaf_path`) + if (self.root.is_leaf) { + let children = &mut self.root.children; + let degree = children.length(); + + if (degree < (self.leaf_max_degree as u64)) { + let result = children.upsert(key, new_leaf_child(value)); + assert!(allow_overwrite || result.is_none(), error::invalid_argument(EKEY_ALREADY_EXISTS)); + return result; + }; + }; + + let path_to_leaf = self.find_leaf_path(&key); + + if (path_to_leaf.is_empty()) { + // In this case, the key is greater than all keys in the map. + // So we need to update `key` in the pointers to the last (rightmost) child + // on every level, to maintain the invariant of `add_at` + // we also create a path_to_leaf to the rightmost leaf. + let current = ROOT_INDEX; + + loop { + path_to_leaf.push_back(current); + + let current_node = self.borrow_node_mut(current); + if (current_node.is_leaf) { + break; + }; + let last_value = current_node.children.new_end_iter().iter_prev(¤t_node.children).iter_remove(&mut current_node.children); + current = last_value.node_index.stored_to_index(); + current_node.children.add(key, last_value); + }; + }; + + self.add_at(path_to_leaf, key, new_leaf_child(value), allow_overwrite) + } + + fun validate_dynamic_size_and_init_max_degrees(self: &mut BigOrderedMap, key: &K, value: &V) { + let key_size = bcs::serialized_size(key); + let value_size = bcs::serialized_size(value); + self.validate_size_and_init_max_degrees(key_size, value_size) + } + + fun validate_static_size_and_init_max_degrees(self: &mut BigOrderedMap) { + let key_size = bcs::constant_serialized_size(); + let value_size = bcs::constant_serialized_size(); + + if (key_size.is_some()) { + let key_size = key_size.destroy_some(); + if (self.inner_max_degree == 0) { + self.inner_max_degree = max(min(MAX_DEGREE, DEFAULT_TARGET_NODE_SIZE / key_size), INNER_MIN_DEGREE as u64) as u16; + }; + assert!(key_size * (self.inner_max_degree as u64) <= MAX_NODE_BYTES, error::invalid_argument(EKEY_BYTES_TOO_LARGE)); + + if (value_size.is_some()) { + let value_size = value_size.destroy_some(); + let entry_size = key_size + value_size; + + if (self.leaf_max_degree == 0) { + self.leaf_max_degree = max(min(MAX_DEGREE, DEFAULT_TARGET_NODE_SIZE / entry_size), LEAF_MIN_DEGREE as u64) as u16; + }; + assert!(entry_size * (self.leaf_max_degree as u64) <= MAX_NODE_BYTES, error::invalid_argument(EARGUMENT_BYTES_TOO_LARGE)); + + self.constant_kv_size = true; + }; + } + } + + fun validate_size_and_init_max_degrees(self: &mut BigOrderedMap, key_size: u64, value_size: u64) { + let entry_size = key_size + value_size; + + if (self.inner_max_degree == 0) { + let default_max_degree = min(MAX_DEGREE, MAX_NODE_BYTES / DEFAULT_MAX_KEY_OR_VALUE_SIZE); + self.inner_max_degree = max(min(default_max_degree, DEFAULT_TARGET_NODE_SIZE / key_size), INNER_MIN_DEGREE as u64) as u16; + }; + + if (self.leaf_max_degree == 0) { + let default_max_degree = min(MAX_DEGREE, MAX_NODE_BYTES / DEFAULT_MAX_KEY_OR_VALUE_SIZE / 2); + self.leaf_max_degree = max(min(default_max_degree, DEFAULT_TARGET_NODE_SIZE / entry_size), LEAF_MIN_DEGREE as u64) as u16; + }; + + // Make sure that no nodes can exceed the upper size limit. + assert!(key_size * (self.inner_max_degree as u64) <= MAX_NODE_BYTES, error::invalid_argument(EKEY_BYTES_TOO_LARGE)); + assert!(entry_size * (self.leaf_max_degree as u64) <= MAX_NODE_BYTES, error::invalid_argument(EARGUMENT_BYTES_TOO_LARGE)); + } + + fun destroy_inner_child(self: Child): StoredSlot { + let Child::Inner { + node_index, + } = self; + + node_index + } + + fun destroy_empty_node(self: Node) { + let Node::V1 { children, is_leaf: _, prev: _, next: _ } = self; + assert!(children.is_empty(), error::invalid_argument(EMAP_NOT_EMPTY)); + children.destroy_empty(); + } + + fun new_node(is_leaf: bool): Node { + Node::V1 { + is_leaf: is_leaf, + children: ordered_map::new(), + prev: NULL_INDEX, + next: NULL_INDEX, + } + } + + fun new_node_with_children(is_leaf: bool, children: OrderedMap>): Node { + Node::V1 { + is_leaf: is_leaf, + children: children, + prev: NULL_INDEX, + next: NULL_INDEX, + } + } + + fun new_inner_child(node_index: StoredSlot): Child { + Child::Inner { + node_index: node_index, + } + } + + fun new_leaf_child(value: V): Child { + Child::Leaf { + value: value, + } + } + + fun new_iter(node_index: u64, child_iter: ordered_map::IteratorPtr, key: K): IteratorPtr { + IteratorPtr::Some { + node_index: node_index, + child_iter: child_iter, + key: key, + } + } + + /// Find leaf where the given key would fall in. + /// So the largest leaf with its `max_key <= key`. + /// return NULL_INDEX if `key` is larger than any key currently stored in the map. + fun find_leaf(self: &BigOrderedMap, key: &K): u64 { + let current = ROOT_INDEX; + loop { + let node = self.borrow_node(current); + if (node.is_leaf) { + return current; + }; + let children = &node.children; + let child_iter = children.lower_bound(key); + if (child_iter.iter_is_end(children)) { + return NULL_INDEX; + } else { + current = child_iter.iter_borrow(children).node_index.stored_to_index(); + }; + } + } + + /// Find leaf where the given key would fall in. + /// So the largest leaf with it's `max_key <= key`. + /// Returns the path from root to that leaf (including the leaf itself) + /// Returns empty path if `key` is larger than any key currently stored in the map. + fun find_leaf_path(self: &BigOrderedMap, key: &K): vector { + let vec = vector::empty(); + + let current = ROOT_INDEX; + loop { + vec.push_back(current); + + let node = self.borrow_node(current); + if (node.is_leaf) { + return vec; + }; + let children = &node.children; + let child_iter = children.lower_bound(key); + if (child_iter.iter_is_end(children)) { + return vector::empty(); + } else { + current = child_iter.iter_borrow(children).node_index.stored_to_index(); + }; + } + } + + fun get_max_degree(self: &BigOrderedMap, leaf: bool): u64 { + if (leaf) { + self.leaf_max_degree as u64 + } else { + self.inner_max_degree as u64 + } + } + + fun replace_root(self: &mut BigOrderedMap, new_root: Node): Node { + // TODO: once mem::replace is made public/released, update to: + // mem::replace(&mut self.root, new_root_node) + + let root = &mut self.root; + let tmp_is_leaf = root.is_leaf; + root.is_leaf = new_root.is_leaf; + new_root.is_leaf = tmp_is_leaf; + + assert!(root.prev == NULL_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + assert!(root.next == NULL_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + assert!(new_root.prev == NULL_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + assert!(new_root.next == NULL_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + // let tmp_prev = root.prev; + // root.prev = new_root.prev; + // new_root.prev = tmp_prev; + + // let tmp_next = root.next; + // root.next = new_root.next; + // new_root.next = tmp_next; + + let tmp_children = root.children.trim(0); + root.children.append_disjoint(new_root.children.trim(0)); + new_root.children.append_disjoint(tmp_children); + + new_root + } + + /// Add a given child to a given node (last in the `path_to_node`), and update/rebalance the tree as necessary. + /// It is required that `key` pointers to the child node, on the `path_to_node` are greater or equal to the given key. + /// That means if we are adding a `key` larger than any currently existing in the map - we needed + /// to update `key` pointers on the `path_to_node` to include it, before calling this method. + /// + /// Returns Child previously associated with the given key. + /// If `allow_overwrite` is not set, function will abort if `key` is already present. + fun add_at(self: &mut BigOrderedMap, path_to_node: vector, key: K, child: Child, allow_overwrite: bool): Option> { + // Last node in the path is one where we need to add the child to. + let node_index = path_to_node.pop_back(); + { + // First check if we can perform this operation, without changing structure of the tree (i.e. without adding any nodes). + + // For that we can just borrow the single node + let node = self.borrow_node_mut(node_index); + let children = &mut node.children; + let degree = children.length(); + + // Compute directly, as we cannot use get_max_degree(), as self is already mutably borrowed. + let max_degree = if (node.is_leaf) { + self.leaf_max_degree as u64 + } else { + self.inner_max_degree as u64 + }; + + if (degree < max_degree) { + // Adding a child to a current node doesn't exceed the size, so we can just do that. + let old_child = children.upsert(key, child); + + if (node.is_leaf) { + assert!(allow_overwrite || old_child.is_none(), error::invalid_argument(EKEY_ALREADY_EXISTS)); + return old_child; + } else { + assert!(!allow_overwrite && old_child.is_none(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + return old_child; + }; + }; + + // If we cannot add more nodes without exceeding the size, + // but node with `key` already exists, we either need to replace or abort. + let iter = children.find(&key); + if (!iter.iter_is_end(children)) { + assert!(node.is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + assert!(allow_overwrite, error::invalid_argument(EKEY_ALREADY_EXISTS)); + + return option::some(iter.iter_replace(children, child)); + } + }; + + // # of children in the current node exceeds the threshold, need to split into two nodes. + + // If we are at the root, we need to move root node to become a child and have a new root node, + // in order to be able to split the node on the level it is. + let (reserved_slot, node) = if (node_index == ROOT_INDEX) { + assert!(path_to_node.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + // Splitting root now, need to create a new root. + // Since root is stored direclty in the resource, we will swap-in the new node there. + let new_root_node = new_node(/*is_leaf=*/false); + + // Reserve a slot where the current root will be moved to. + let (replacement_node_slot, replacement_node_reserved_slot) = self.nodes.reserve_slot(); + + let max_key = { + let root_children = &self.root.children; + let max_key = *root_children.new_end_iter().iter_prev(root_children).iter_borrow_key(root_children); + // need to check if key is largest, as invariant is that "parent's pointers" have been updated, + // but key itself can be larger than all previous ones. + if (cmp::compare(&max_key, &key).is_lt()) { + max_key = key; + }; + max_key + }; + // New root will have start with a single child - the existing root (which will be at replacement location). + new_root_node.children.add(max_key, new_inner_child(replacement_node_slot)); + let node = self.replace_root(new_root_node); + + // we moved the currently processing node one level down, so we need to update the path + path_to_node.push_back(ROOT_INDEX); + + let replacement_index = replacement_node_reserved_slot.reserved_to_index(); + if (node.is_leaf) { + // replacement node is the only leaf, so we update the pointers: + self.min_leaf_index = replacement_index; + self.max_leaf_index = replacement_index; + }; + (replacement_node_reserved_slot, node) + } else { + // In order to work on multiple nodes at the same time, we cannot borrow_mut, and need to be + // remove_and_reserve existing node. + let (cur_node_reserved_slot, node) = self.nodes.remove_and_reserve(node_index); + (cur_node_reserved_slot, node) + }; + + // move node_index out of scope, to make sure we don't accidentally access it, as we are done with it. + // (i.e. we should be using `reserved_slot` instead). + move node_index; + + // Now we can perform the split at the current level, as we know we are not at the root level. + assert!(!path_to_node.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + // Parent has a reference under max key to the current node, so existing index + // needs to be the right node. + // Since ordered_map::trim moves from the end (i.e. smaller keys stay), + // we are going to put the contents of the current node on the left side, + // and create a new right node. + // So if we had before (node_index, node), we will change that to end up having: + // (new_left_node_index, node trimmed off) and (node_index, new node with trimmed off children) + // + // So let's rename variables cleanly: + let right_node_reserved_slot = reserved_slot; + let left_node = node; + + let is_leaf = left_node.is_leaf; + let left_children = &mut left_node.children; + + let right_node_index = right_node_reserved_slot.reserved_to_index(); + let left_next = &mut left_node.next; + let left_prev = &mut left_node.prev; + + // Compute directly, as we cannot use get_max_degree(), as self is already mutably borrowed. + let max_degree = if (is_leaf) { + self.leaf_max_degree as u64 + } else { + self.inner_max_degree as u64 + }; + // compute the target size for the left node: + let target_size = (max_degree + 1) / 2; + + // Add child (which will exceed the size), and then trim off to create two sets of children of correct sizes. + left_children.add(key, child); + let right_node_children = left_children.trim(target_size); + + assert!(left_children.length() <= max_degree, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + assert!(right_node_children.length() <= max_degree, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + let right_node = new_node_with_children(is_leaf, right_node_children); + + let (left_node_slot, left_node_reserved_slot) = self.nodes.reserve_slot(); + let left_node_index = left_node_slot.stored_to_index(); + + // right nodes next is the node that was next of the left (previous) node, and next of left node is the right node. + right_node.next = *left_next; + *left_next = right_node_index; + + // right node's prev becomes current left node + right_node.prev = left_node_index; + // Since the previously used index is going to the right node, `prev` pointer of the next node is correct, + // and we need to update next pointer of the previous node (if exists) + if (*left_prev != NULL_INDEX) { + self.nodes.borrow_mut(*left_prev).next = left_node_index; + assert!(right_node_index != self.min_leaf_index, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + } else if (right_node_index == self.min_leaf_index) { + // Otherwise, if we were the smallest node on the level. if this is the leaf level, update the pointer. + assert!(is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + self.min_leaf_index = left_node_index; + }; + + // Largest left key is the split key. + let max_left_key = *left_children.new_end_iter().iter_prev(left_children).iter_borrow_key(left_children); + + self.nodes.fill_reserved_slot(left_node_reserved_slot, left_node); + self.nodes.fill_reserved_slot(right_node_reserved_slot, right_node); + + // Add new Child (i.e. pointer to the left node) in the parent. + self.add_at(path_to_node, max_left_key, new_inner_child(left_node_slot), false).destroy_none(); + option::none() + } + + /// Given a path to node (excluding the node itself), which is currently stored under "old_key", update "old_key" to "new_key". + fun update_key(self: &mut BigOrderedMap, path_to_node: vector, old_key: &K, new_key: K) { + while (!path_to_node.is_empty()) { + let node_index = path_to_node.pop_back(); + let node = self.borrow_node_mut(node_index); + let children = &mut node.children; + children.replace_key_inplace(old_key, new_key); + + // If we were not updating the largest child, we don't need to continue. + if (children.new_end_iter().iter_prev(children).iter_borrow_key(children) != &new_key) { + return + }; + } + } + + fun remove_at(self: &mut BigOrderedMap, path_to_node: vector, key: &K): Child { + // Last node in the path is one where we need to remove the child from. + let node_index = path_to_node.pop_back(); + let old_child = { + // First check if we can perform this operation, without changing structure of the tree (i.e. without rebalancing any nodes). + + // For that we can just borrow the single node + let node = self.borrow_node_mut(node_index); + + let children = &mut node.children; + let is_leaf = node.is_leaf; + + let old_child = children.remove(key); + if (node_index == ROOT_INDEX) { + // If current node is root, lower limit of max_degree/2 nodes doesn't apply. + // So we can adjust internally + + assert!(path_to_node.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + if (!is_leaf && children.length() == 1) { + // If root is not leaf, but has a single child, promote only child to root, + // and drop current root. Since root is stored directly in the resource, we + // "move" the child into the root. + + let Child::Inner { + node_index: inner_child_index, + } = children.new_end_iter().iter_prev(children).iter_remove(children); + + let inner_child = self.nodes.remove(inner_child_index); + if (inner_child.is_leaf) { + self.min_leaf_index = ROOT_INDEX; + self.max_leaf_index = ROOT_INDEX; + }; + + self.replace_root(inner_child).destroy_empty_node(); + }; + return old_child; + }; + + // Compute directly, as we cannot use get_max_degree(), as self is already mutably borrowed. + let max_degree = if (is_leaf) { + self.leaf_max_degree as u64 + } else { + self.inner_max_degree as u64 + }; + let degree = children.length(); + + // See if the node is big enough, or we need to merge it with another node on this level. + let big_enough = degree * 2 >= max_degree; + + let new_max_key = *children.new_end_iter().iter_prev(children).iter_borrow_key(children); + + // See if max key was updated for the current node, and if so - update it on the path. + let max_key_updated = cmp::compare(&new_max_key, key).is_lt(); + if (max_key_updated) { + assert!(degree >= 1, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + self.update_key(path_to_node, key, new_max_key); + }; + + // If node is big enough after removal, we are done. + if (big_enough) { + return old_child; + }; + + old_child + }; + + // Children size is below threshold, we need to rebalance with a neighbor on the same level. + + // In order to work on multiple nodes at the same time, we cannot borrow_mut, and need to be + // remove_and_reserve existing node. + let (node_slot, node) = self.nodes.remove_and_reserve(node_index); + + let is_leaf = node.is_leaf; + let max_degree = self.get_max_degree(is_leaf); + let prev = node.prev; + let next = node.next; + + // index of the node we will rebalance with. + let sibling_index = { + let parent_children = &self.borrow_node(*path_to_node.borrow(path_to_node.length() - 1)).children; + assert!(parent_children.length() >= 2, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + // If we are the largest node from the parent, we merge with the `prev` + // (which is then guaranteed to have the same parent, as any node has >1 children), + // otherwise we merge with `next`. + if (parent_children.new_end_iter().iter_prev(parent_children).iter_borrow(parent_children).node_index.stored_to_index() == node_index) { + prev + } else { + next + } + }; + + let children = &mut node.children; + + let (sibling_slot, sibling_node) = self.nodes.remove_and_reserve(sibling_index); + assert!(is_leaf == sibling_node.is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + let sibling_children = &mut sibling_node.children; + + if ((sibling_children.length() - 1) * 2 >= max_degree) { + // The sibling node has enough elements, we can just borrow an element from the sibling node. + if (sibling_index == next) { + // if sibling is the node with larger keys, we remove a child from the start + let old_max_key = *children.new_end_iter().iter_prev(children).iter_borrow_key(children); + let sibling_begin_iter = sibling_children.new_begin_iter(); + let borrowed_max_key = *sibling_begin_iter.iter_borrow_key(sibling_children); + let borrowed_element = sibling_begin_iter.iter_remove(sibling_children); + + children.new_end_iter().iter_add(children, borrowed_max_key, borrowed_element); + + // max_key of the current node changed, so update + self.update_key(path_to_node, &old_max_key, borrowed_max_key); + } else { + // if sibling is the node with smaller keys, we remove a child from the end + let sibling_end_iter = sibling_children.new_end_iter().iter_prev(sibling_children); + let borrowed_max_key = *sibling_end_iter.iter_borrow_key(sibling_children); + let borrowed_element = sibling_end_iter.iter_remove(sibling_children); + + children.add(borrowed_max_key, borrowed_element); + + // max_key of the sibling node changed, so update + self.update_key(path_to_node, &borrowed_max_key, *sibling_children.new_end_iter().iter_prev(sibling_children).iter_borrow_key(sibling_children)); + }; + + self.nodes.fill_reserved_slot(node_slot, node); + self.nodes.fill_reserved_slot(sibling_slot, sibling_node); + return old_child; + }; + + // The sibling node doesn't have enough elements to borrow, merge with the sibling node. + // Keep the slot of the node with larger keys of the two, to not require updating key on the parent nodes. + // But append to the node with smaller keys, as ordered_map::append is more efficient when adding to the end. + let (key_to_remove, reserved_slot_to_remove) = if (sibling_index == next) { + // destroying larger sibling node, keeping sibling_slot. + let Node::V1 { children: sibling_children, is_leaf: _, prev: _, next: sibling_next } = sibling_node; + let key_to_remove = *children.new_end_iter().iter_prev(children).iter_borrow_key(children); + children.append_disjoint(sibling_children); + node.next = sibling_next; + + if (node.next != NULL_INDEX) { + assert!(self.nodes.borrow_mut(node.next).prev == sibling_index, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + }; + + // we are removing node_index, which previous's node's next was pointing to, + // so update the pointer + if (node.prev != NULL_INDEX) { + self.nodes.borrow_mut(node.prev).next = sibling_index; + }; + // Otherwise, we were the smallest node on the level. if this is the leaf level, update the pointer. + if (self.min_leaf_index == node_index) { + assert!(is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + self.min_leaf_index = sibling_index; + }; + + self.nodes.fill_reserved_slot(sibling_slot, node); + + (key_to_remove, node_slot) + } else { + // destroying larger current node, keeping node_slot + let Node::V1 { children: node_children, is_leaf: _, prev: _, next: node_next } = node; + let key_to_remove = *sibling_children.new_end_iter().iter_prev(sibling_children).iter_borrow_key(sibling_children); + sibling_children.append_disjoint(node_children); + sibling_node.next = node_next; + + if (sibling_node.next != NULL_INDEX) { + assert!(self.nodes.borrow_mut(sibling_node.next).prev == node_index, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + }; + // we are removing sibling node_index, which previous's node's next was pointing to, + // so update the pointer + if (sibling_node.prev != NULL_INDEX) { + self.nodes.borrow_mut(sibling_node.prev).next = node_index; + }; + // Otherwise, sibling was the smallest node on the level. if this is the leaf level, update the pointer. + if (self.min_leaf_index == sibling_index) { + assert!(is_leaf, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + self.min_leaf_index = node_index; + }; + + self.nodes.fill_reserved_slot(node_slot, sibling_node); + + (key_to_remove, sibling_slot) + }; + + assert!(!path_to_node.is_empty(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + let slot_to_remove = self.remove_at(path_to_node, &key_to_remove).destroy_inner_child(); + self.nodes.free_reserved_slot(reserved_slot_to_remove, slot_to_remove); + + old_child + } + + // ===== spec =========== + + spec module { + pragma verify = false; + } + + // recursive functions need to be marked opaque + + spec add_at { + pragma opaque; + } + + spec remove_at { + pragma opaque; + } + + // ============================= Tests ==================================== + + #[test_only] + fun print_map(self: &BigOrderedMap) { + // uncomment to debug: + // aptos_std::debug::print(&std::string::utf8(b"print map")); + // aptos_std::debug::print(self); + // self.print_map_for_node(ROOT_INDEX, 0); + } + + #[test_only] + fun print_map_for_node(self: &BigOrderedMap, node_index: u64, level: u64) { + let node = self.borrow_node(node_index); + + aptos_std::debug::print(&level); + aptos_std::debug::print(&node_index); + aptos_std::debug::print(node); + + if (!node.is_leaf) { + node.children.for_each_ref_friend(|_key, node| { + self.print_map_for_node(node.node_index.stored_to_index(), level + 1); + }); + }; + } + + #[test_only] + fun destroy_and_validate(self: BigOrderedMap) { + let it = self.new_begin_iter(); + while (!it.iter_is_end(&self)) { + self.remove(it.iter_borrow_key()); + assert!(self.find(it.iter_borrow_key()).iter_is_end(&self), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + it = self.new_begin_iter(); + self.validate_map(); + }; + + self.destroy_empty(); + } + + #[test_only] + fun validate_iteration(self: &BigOrderedMap) { + let expected_num_elements = self.compute_length(); + let num_elements = 0; + let it = self.new_begin_iter(); + while (!it.iter_is_end(self)) { + num_elements += 1; + it = it.iter_next(self); + }; + + assert!(num_elements == expected_num_elements, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + let num_elements = 0; + let it = self.new_end_iter(); + while (!it.iter_is_begin(self)) { + it = it.iter_prev(self); + num_elements += 1; + }; + assert!(num_elements == expected_num_elements, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + let it = self.new_end_iter(); + if (!it.iter_is_begin(self)) { + it = it.iter_prev(self); + assert!(it.node_index == self.max_leaf_index, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + } else { + assert!(expected_num_elements == 0, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + }; + } + + #[test_only] + fun validate_subtree(self: &BigOrderedMap, node_index: u64, expected_lower_bound_key: Option, expected_max_key: Option) { + let node = self.borrow_node(node_index); + let len = node.children.length(); + assert!(len <= self.get_max_degree(node.is_leaf), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + + if (node_index != ROOT_INDEX) { + assert!(len >= 1, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + assert!(len * 2 >= self.get_max_degree(node.is_leaf) || node_index == ROOT_INDEX, error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + }; + + node.children.validate_ordered(); + + let previous_max_key = expected_lower_bound_key; + node.children.for_each_ref_friend(|key: &K, child: &Child| { + if (!node.is_leaf) { + self.validate_subtree(child.node_index.stored_to_index(), previous_max_key, option::some(*key)); + } else { + assert!((child is Child::Leaf), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + }; + previous_max_key = option::some(*key); + }); + + if (expected_max_key.is_some()) { + let expected_max_key = expected_max_key.extract(); + assert!(&expected_max_key == node.children.new_end_iter().iter_prev(&node.children).iter_borrow_key(&node.children), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + }; + + if (expected_lower_bound_key.is_some()) { + let expected_lower_bound_key = expected_lower_bound_key.extract(); + assert!(cmp::compare(&expected_lower_bound_key, node.children.new_begin_iter().iter_borrow_key(&node.children)).is_lt(), error::invalid_state(EINTERNAL_INVARIANT_BROKEN)); + }; + } + + #[test_only] + fun validate_map(self: &BigOrderedMap) { + self.validate_subtree(ROOT_INDEX, option::none(), option::none()); + self.validate_iteration(); + } + + #[test] + fun test_small_example() { + let map = new_with_config(5, 3, true); + map.allocate_spare_slots(2); + map.print_map(); map.validate_map(); + map.add(1, 1); map.print_map(); map.validate_map(); + map.add(2, 2); map.print_map(); map.validate_map(); + let r1 = map.upsert(3, 3); map.print_map(); map.validate_map(); + assert!(r1 == option::none(), 1); + map.add(4, 4); map.print_map(); map.validate_map(); + let r2 = map.upsert(4, 8); map.print_map(); map.validate_map(); + assert!(r2 == option::some(4), 2); + map.add(5, 5); map.print_map(); map.validate_map(); + map.add(6, 6); map.print_map(); map.validate_map(); + + let expected_keys = vector[1, 2, 3, 4, 5, 6]; + let expected_values = vector[1, 2, 3, 8, 5, 6]; + + let index = 0; + map.for_each_ref(|k, v| { + assert!(k == expected_keys.borrow(index), *k + 100); + assert!(v == expected_values.borrow(index), *k + 200); + index += 1; + }); + + let index = 0; + map.for_each_ref_friend(|k, v| { + assert!(k == expected_keys.borrow(index), *k + 100); + assert!(v == expected_values.borrow(index), *k + 200); + index += 1; + }); + + expected_keys.zip(expected_values, |key, value| { + assert!(map.borrow(&key) == &value, key + 300); + assert!(map.borrow_mut(&key) == &value, key + 400); + }); + + map.remove(&5); map.print_map(); map.validate_map(); + map.remove(&4); map.print_map(); map.validate_map(); + map.remove(&1); map.print_map(); map.validate_map(); + map.remove(&3); map.print_map(); map.validate_map(); + map.remove(&2); map.print_map(); map.validate_map(); + map.remove(&6); map.print_map(); map.validate_map(); + + map.destroy_empty(); + } + + #[test] + fun test_for_each() { + let map = new_with_config(4, 3, false); + map.add_all(vector[1, 3, 6, 2, 9, 5, 7, 4, 8], vector[1, 3, 6, 2, 9, 5, 7, 4, 8]); + + let expected = vector[1, 2, 3, 4, 5, 6, 7, 8, 9]; + let index = 0; + map.for_each(|k, v| { + assert!(k == expected[index], k + 100); + assert!(v == expected[index], k + 200); + index += 1; + }); + } + + #[test] + fun test_for_each_ref() { + let map = new_with_config(4, 3, false); + map.add_all(vector[1, 3, 6, 2, 9, 5, 7, 4, 8], vector[1, 3, 6, 2, 9, 5, 7, 4, 8]); + + let expected = vector[1, 2, 3, 4, 5, 6, 7, 8, 9]; + let index = 0; + map.for_each_ref(|k, v| { + assert!(*k == expected[index], *k + 100); + assert!(*v == expected[index], *k + 200); + index += 1; + }); + + map.destroy(|_v| {}); + } + + #[test] + fun test_for_each_variants() { + let keys = vector[1, 3, 5]; + let values = vector[10, 30, 50]; + let map = new_from(keys, values); + + let index = 0; + map.for_each_ref(|k, v| { + assert!(keys[index] == *k); + assert!(values[index] == *v); + index += 1; + }); + + let index = 0; + map.for_each_mut(|k, v| { + assert!(keys[index] == *k); + assert!(values[index] == *v); + *v += 1; + index += 1; + }); + + let index = 0; + map.for_each(|k, v| { + assert!(keys[index] == k); + assert!(values[index] + 1 == v); + index += 1; + }); + } + + #[test] + fun test_variable_size() { + let map = new_with_config, vector>(0, 0, false); + map.print_map(); map.validate_map(); + map.add(vector[1], vector[1]); map.print_map(); map.validate_map(); + map.add(vector[2], vector[2]); map.print_map(); map.validate_map(); + let r1 = map.upsert(vector[3], vector[3]); map.print_map(); map.validate_map(); + assert!(r1 == option::none(), 1); + map.add(vector[4], vector[4]); map.print_map(); map.validate_map(); + let r2 = map.upsert(vector[4], vector[8, 8, 8]); map.print_map(); map.validate_map(); + assert!(r2 == option::some(vector[4]), 2); + map.add(vector[5], vector[5]); map.print_map(); map.validate_map(); + map.add(vector[6], vector[6]); map.print_map(); map.validate_map(); + + vector[1, 2, 3, 4, 5, 6].zip(vector[1, 2, 3, 8, 5, 6], |key, value| { + assert!(map.borrow(&vector[key])[0] == value, key + 100); + }); + + map.remove(&vector[5]); map.print_map(); map.validate_map(); + map.remove(&vector[4]); map.print_map(); map.validate_map(); + map.remove(&vector[1]); map.print_map(); map.validate_map(); + map.remove(&vector[3]); map.print_map(); map.validate_map(); + map.remove(&vector[2]); map.print_map(); map.validate_map(); + map.remove(&vector[6]); map.print_map(); map.validate_map(); + + map.destroy_empty(); + } + #[test] + fun test_deleting_and_creating_nodes() { + let map = new_with_config(4, 3, true); + map.allocate_spare_slots(2); + + for (i in 0..25) { + map.upsert(i, i); + map.validate_map(); + }; + + for (i in 0..20) { + map.remove(&i); + map.validate_map(); + }; + + for (i in 25..50) { + map.upsert(i, i); + map.validate_map(); + }; + + for (i in 25..45) { + map.remove(&i); + map.validate_map(); + }; + + for (i in 50..75) { + map.upsert(i, i); + map.validate_map(); + }; + + for (i in 50..75) { + map.remove(&i); + map.validate_map(); + }; + + for (i in 20..25) { + map.remove(&i); + map.validate_map(); + }; + + for (i in 45..50) { + map.remove(&i); + map.validate_map(); + }; + + map.destroy_empty(); + } + + #[test] + fun test_iterator() { + let map = new_with_config(5, 5, true); + map.allocate_spare_slots(2); + + let data = vector[1, 7, 5, 8, 4, 2, 6, 3, 9, 0]; + while (data.length() != 0) { + let element = data.pop_back(); + map.add(element, element); + }; + + let it = map.new_begin_iter(); + + let i = 0; + while (!it.iter_is_end(&map)) { + assert!(i == it.key, i); + assert!(it.iter_borrow(&map) == &i, i); + assert!(it.iter_borrow_mut(&mut map) == &i, i); + i += 1; + it = it.iter_next(&map); + }; + + map.destroy(|_v| {}); + } + + #[test] + fun test_find() { + let map = new_with_config(5, 5, true); + map.allocate_spare_slots(2); + + let data = vector[11, 1, 7, 5, 8, 2, 6, 3, 0, 10]; + map.add_all(data, data); + + let i = 0; + while (i < data.length()) { + let element = data.borrow(i); + let it = map.find(element); + assert!(!it.iter_is_end(&map), i); + assert!(it.iter_borrow_key() == element, i); + i += 1; + }; + + assert!(map.find(&4).iter_is_end(&map), 0); + assert!(map.find(&9).iter_is_end(&map), 1); + + map.destroy(|_v| {}); + } + + #[test] + fun test_lower_bound() { + let map = new_with_config(5, 5, true); + map.allocate_spare_slots(2); + + let data = vector[11, 1, 7, 5, 8, 2, 6, 3, 12, 10]; + map.add_all(data, data); + + let i = 0; + while (i < data.length()) { + let element = *data.borrow(i); + let it = map.lower_bound(&element); + assert!(!it.iter_is_end(&map), i); + assert!(it.key == element, i); + i += 1; + }; + + assert!(map.lower_bound(&0).key == 1, 0); + assert!(map.lower_bound(&4).key == 5, 1); + assert!(map.lower_bound(&9).key == 10, 2); + assert!(map.lower_bound(&13).iter_is_end(&map), 3); + + map.remove(&3); + assert!(map.lower_bound(&3).key == 5, 4); + map.remove(&5); + assert!(map.lower_bound(&3).key == 6, 5); + assert!(map.lower_bound(&4).key == 6, 6); + + map.destroy(|_v| {}); + } + + #[test] + fun test_contains() { + let map = new_with_config(4, 3, false); + let data = vector[3, 1, 9, 7, 5]; + map.add_all(vector[3, 1, 9, 7, 5], vector[3, 1, 9, 7, 5]); + + data.for_each_ref(|i| assert!(map.contains(i), *i)); + + let missing = vector[0, 2, 4, 6, 8, 10]; + missing.for_each_ref(|i| assert!(!map.contains(i), *i)); + + map.destroy(|_v| {}); + } + + #[test] + fun test_non_iterator_ordering() { + let map = new_from(vector[1, 2, 3], vector[10, 20, 30]); + assert!(map.prev_key(&1).is_none(), 1); + assert!(map.next_key(&1) == option::some(2), 1); + + assert!(map.prev_key(&2) == option::some(1), 2); + assert!(map.next_key(&2) == option::some(3), 3); + + assert!(map.prev_key(&3) == option::some(2), 4); + assert!(map.next_key(&3).is_none(), 5); + + let (front_k, front_v) = map.borrow_front(); + assert!(front_k == 1, 6); + assert!(front_v == &10, 7); + + let (back_k, back_v) = map.borrow_back(); + assert!(back_k == 3, 8); + assert!(back_v == &30, 9); + + let (front_k, front_v) = map.pop_front(); + assert!(front_k == 1, 10); + assert!(front_v == 10, 11); + + let (back_k, back_v) = map.pop_back(); + assert!(back_k == 3, 12); + assert!(back_v == 30, 13); + + map.destroy(|_v| {}); + } + + #[test] + #[expected_failure(abort_code = 0x1000B, location = Self)] /// EINVALID_CONFIG_PARAMETER + fun test_inner_max_degree_too_large() { + let map = new_with_config(4097, 0, false); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x1000B, location = Self)] /// EINVALID_CONFIG_PARAMETER + fun test_inner_max_degree_too_small() { + let map = new_with_config(3, 0, false); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x1000B, location = Self)] /// EINVALID_CONFIG_PARAMETER + fun test_leaf_max_degree_too_small() { + let map = new_with_config(0, 2, false); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10001, location = Self)] /// EKEY_ALREADY_EXISTS + fun test_abort_add_existing_value() { + let map = new_from(vector[1], vector[1]); + map.add(1, 2); + map.destroy_and_validate(); + } + + #[test_only] + fun vector_range(from: u64, to: u64): vector { + let result = vector[]; + for (i in from..to) { + result.push_back(i); + }; + result + } + + #[test_only] + fun vector_bytes_range(from: u64, to: u64): vector { + let result = vector[]; + for (i in from..to) { + result.push_back((i % 128) as u8); + }; + result + } + + #[test] + #[expected_failure(abort_code = 0x10001, location = Self)] /// EKEY_ALREADY_EXISTS + fun test_abort_add_existing_value_to_non_leaf() { + let map = new_with_config(4, 4, false); + map.add_all(vector_range(1, 10), vector_range(1, 10)); + map.add(3, 3); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = aptos_std::ordered_map)] /// EKEY_NOT_FOUND + fun test_abort_remove_missing_value() { + let map = new_from(vector[1], vector[1]); + map.remove(&2); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = aptos_std::ordered_map)] /// EKEY_NOT_FOUND + fun test_abort_remove_missing_value_to_non_leaf() { + let map = new_with_config(4, 4, false); + map.add_all(vector_range(1, 10), vector_range(1, 10)); + map.remove(&4); + map.remove(&4); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = Self)] /// EKEY_NOT_FOUND + fun test_abort_remove_largest_missing_value_to_non_leaf() { + let map = new_with_config(4, 4, false); + map.add_all(vector_range(1, 10), vector_range(1, 10)); + map.remove(&11); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = Self)] /// EKEY_NOT_FOUND + fun test_abort_borrow_missing() { + let map = new_from(vector[1], vector[1]); + map.borrow(&2); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = Self)] /// EKEY_NOT_FOUND + fun test_abort_borrow_mut_missing() { + let map = new_from(vector[1], vector[1]); + map.borrow_mut(&2); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x1000E, location = Self)] /// EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE + fun test_abort_borrow_mut_requires_constant_value_size() { + let map = new_with_config(0, 0, false); + map.add(1, vector[1]); + map.borrow_mut(&1); + map.destroy_and_validate(); + } + + #[test] + fun test_borrow_mut_allows_variable_key_size() { + let map = new_with_config(0, 0, false); + map.add(vector[1], 1); + map.borrow_mut(&vector[1]); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + fun test_abort_iter_borrow_key_missing() { + let map = new_from(vector[1], vector[1]); + map.new_end_iter().iter_borrow_key(); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + fun test_abort_iter_borrow_missing() { + let map = new_from(vector[1], vector[1]); + map.new_end_iter().iter_borrow(&map); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + fun test_abort_iter_borrow_mut_missing() { + let map = new_from(vector[1], vector[1]); + map.new_end_iter().iter_borrow_mut(&mut map); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x1000E, location = Self)] /// EBORROW_MUT_REQUIRES_CONSTANT_VALUE_SIZE + fun test_abort_iter_borrow_mut_requires_constant_kv_size() { + let map = new_with_config(0, 0, false); + map.add(1, vector[1]); + map.new_begin_iter().iter_borrow_mut(&mut map); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + fun test_abort_end_iter_next() { + let map = new_from(vector[1, 2, 3], vector[1, 2, 3]); + map.new_end_iter().iter_next(&map); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + fun test_abort_begin_iter_prev() { + let map = new_from(vector[1, 2, 3], vector[1, 2, 3]); + map.new_begin_iter().iter_prev(&map); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x1000C, location = Self)] /// EMAP_NOT_EMPTY + fun test_abort_fail_to_destroy_non_empty() { + let map = new_from(vector[1], vector[1]); + map.destroy_empty(); + } + + #[test] + fun test_default_allows_5kb() { + let map = new_with_config(0, 0, false); + map.add(vector[1u8], 1); + // default guarantees key up to 5KB + map.add(vector_bytes_range(0, 5000), 1); + map.destroy_and_validate(); + + let map = new_with_config(0, 0, false); + // default guarantees (key, value) pair up to 10KB + map.add(1, vector[1u8]); + map.add(2, vector_bytes_range(0, 10000)); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x1000F, location = Self)] /// EKEY_BYTES_TOO_LARGE + fun test_adding_key_too_large() { + let map = new_with_config(0, 0, false); + map.add(vector[1u8], 1); + // default guarantees key up to 5KB + map.add(vector_bytes_range(0, 5200), 1); + map.destroy_and_validate(); + } + + #[test] + #[expected_failure(abort_code = 0x1000D, location = Self)] /// EARGUMENT_BYTES_TOO_LARGE + fun test_adding_value_too_large() { + let map = new_with_config(0, 0, false); + // default guarantees (key, value) pair up to 10KB + map.add(1, vector[1u8]); + map.add(2, vector_bytes_range(0, 12000)); + map.destroy_and_validate(); + } + + #[test_only] + inline fun comparison_test(repeats: u64, inner_max_degree: u16, leaf_max_degree: u16, reuse_slots: bool, next_1: ||u64, next_2: ||u64) { + let big_map = new_with_config(inner_max_degree, leaf_max_degree, reuse_slots); + if (reuse_slots) { + big_map.allocate_spare_slots(4); + }; + let small_map = ordered_map::new(); + for (i in 0..repeats) { + let is_insert = if (2 * i < repeats) { + i % 3 != 2 + } else { + i % 3 == 0 + }; + if (is_insert) { + let v = next_1(); + assert!(big_map.upsert(v, v) == small_map.upsert(v, v), i); + } else { + let v = next_2(); + assert!(big_map.remove(&v) == small_map.remove(&v), i); + }; + if ((i + 1) % 50 == 0) { + big_map.validate_map(); + + let big_iter = big_map.new_begin_iter(); + let small_iter = small_map.new_begin_iter(); + while (!big_iter.iter_is_end(&big_map) || !small_iter.iter_is_end(&small_map)) { + assert!(big_iter.iter_borrow_key() == small_iter.iter_borrow_key(&small_map), i); + assert!(big_iter.iter_borrow(&big_map) == small_iter.iter_borrow(&small_map), i); + big_iter = big_iter.iter_next(&big_map); + small_iter = small_iter.iter_next(&small_map); + }; + }; + }; + big_map.destroy_and_validate(); + } + + #[test_only] + const OFFSET: u64 = 270001; + #[test_only] + const MOD: u64 = 1000000; + + #[test] + fun test_comparison_random() { + let x = 1234; + let y = 1234; + comparison_test(500, 5, 5, false, + || { + x += OFFSET; + if (x > MOD) { x -= MOD}; + x + }, + || { + y += OFFSET; + if (y > MOD) { y -= MOD}; + y + }, + ); + } + + #[test] + fun test_comparison_increasing() { + let x = 0; + let y = 0; + comparison_test(500, 5, 5, false, + || { + x += 1; + x + }, + || { + y += 1; + y + }, + ); + } + + #[test] + fun test_comparison_decreasing() { + let x = 100000; + let y = 100000; + comparison_test(500, 5, 5, false, + || { + x -= 1; + x + }, + || { + y -= 1; + y + }, + ); + } + + #[test_only] + fun test_large_data_set_helper(inner_max_degree: u16, leaf_max_degree: u16, reuse_slots: bool) { + use std::vector; + + let map = new_with_config(inner_max_degree, leaf_max_degree, reuse_slots); + if (reuse_slots) { + map.allocate_spare_slots(4); + }; + let data = ordered_map::large_dataset(); + let shuffled_data = ordered_map::large_dataset_shuffled(); + + let len = data.length(); + for (i in 0..len) { + let element = data[i]; + map.upsert(element, element); + if (i % 7 == 0) { + map.validate_map(); + } + }; + + for (i in 0..len) { + let element = shuffled_data.borrow(i); + let it = map.find(element); + assert!(!it.iter_is_end(&map), i); + assert!(it.iter_borrow_key() == element, i); + + // aptos_std::debug::print(&it); + + let it_next = it.iter_next(&map); + let it_after = map.lower_bound(&(*element + 1)); + + // aptos_std::debug::print(&it_next); + // aptos_std::debug::print(&it_after); + // aptos_std::debug::print(&std::string::utf8(b"bla")); + + assert!(it_next == it_after, i); + }; + + let removed = vector::empty(); + for (i in 0..len) { + let element = shuffled_data.borrow(i); + if (!removed.contains(element)) { + removed.push_back(*element); + map.remove(element); + if (i % 7 == 1) { + map.validate_map(); + + } + } else { + assert!(!map.contains(element)); + }; + }; + + map.destroy_empty(); + } + + // Currently ignored long / more extensive tests. + + // #[test] + // fun test_large_data_set_order_5_false() { + // test_large_data_set_helper(5, 5, false); + // } + + // #[test] + // fun test_large_data_set_order_5_true() { + // test_large_data_set_helper(5, 5, true); + // } + + // #[test] + // fun test_large_data_set_order_4_3_false() { + // test_large_data_set_helper(4, 3, false); + // } + + // #[test] + // fun test_large_data_set_order_4_3_true() { + // test_large_data_set_helper(4, 3, true); + // } + + // #[test] + // fun test_large_data_set_order_4_4_false() { + // test_large_data_set_helper(4, 4, false); + // } + + // #[test] + // fun test_large_data_set_order_4_4_true() { + // test_large_data_set_helper(4, 4, true); + // } + + // #[test] + // fun test_large_data_set_order_6_false() { + // test_large_data_set_helper(6, 6, false); + // } + + // #[test] + // fun test_large_data_set_order_6_true() { + // test_large_data_set_helper(6, 6, true); + // } + + // #[test] + // fun test_large_data_set_order_6_3_false() { + // test_large_data_set_helper(6, 3, false); + // } + + #[test] + fun test_large_data_set_order_6_3_true() { + test_large_data_set_helper(6, 3, true); + } + + #[test] + fun test_large_data_set_order_4_6_false() { + test_large_data_set_helper(4, 6, false); + } + + // #[test] + // fun test_large_data_set_order_4_6_true() { + // test_large_data_set_helper(4, 6, true); + // } + + // #[test] + // fun test_large_data_set_order_16_false() { + // test_large_data_set_helper(16, 16, false); + // } + + // #[test] + // fun test_large_data_set_order_16_true() { + // test_large_data_set_helper(16, 16, true); + // } + + // #[test] + // fun test_large_data_set_order_31_false() { + // test_large_data_set_helper(31, 31, false); + // } + + // #[test] + // fun test_large_data_set_order_31_true() { + // test_large_data_set_helper(31, 31, true); + // } + + // #[test] + // fun test_large_data_set_order_31_3_false() { + // test_large_data_set_helper(31, 3, false); + // } + + // #[test] + // fun test_large_data_set_order_31_3_true() { + // test_large_data_set_helper(31, 3, true); + // } + + // #[test] + // fun test_large_data_set_order_31_5_false() { + // test_large_data_set_helper(31, 5, false); + // } + + // #[test] + // fun test_large_data_set_order_31_5_true() { + // test_large_data_set_helper(31, 5, true); + // } + + // #[test] + // fun test_large_data_set_order_32_false() { + // test_large_data_set_helper(32, 32, false); + // } + + // #[test] + // fun test_large_data_set_order_32_true() { + // test_large_data_set_helper(32, 32, true); + // } +} diff --git a/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.move b/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.move new file mode 100644 index 0000000000000..6b64fef04b2e1 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/datastructures/ordered_map.move @@ -0,0 +1,1279 @@ +/// This module provides an implementation for an ordered map. +/// +/// Keys point to values, and each key in the map must be unique. +/// +/// Currently, one implementation is provided, backed by a single sorted vector. +/// +/// That means that keys can be found within O(log N) time. +/// Adds and removals take O(N) time, but the constant factor is small, +/// as it does only O(log N) comparisons, and does efficient mem-copy with vector operations. +/// +/// Additionally, it provides a way to lookup and iterate over sorted keys, making range query +/// take O(log N + R) time (where R is number of elements in the range). +/// +/// Most methods operate with OrderedMap being `self`. +/// All methods that start with iter_*, operate with IteratorPtr being `self`. +/// +/// Uses cmp::compare for ordering, which compares primitive types natively, and uses common +/// lexicographical sorting for complex types. +/// +/// TODO: all iterator functions are public(friend) for now, so that they can be modified in a +/// backward incompatible way. Type is also named IteratorPtr, so that Iterator is free to use later. +/// They are waiting for Move improvement that will allow references to be part of the struct, +/// allowing cleaner iterator APIs. +/// +module aptos_std::ordered_map { + friend aptos_std::big_ordered_map; + + use std::vector; + + use std::option::{Self, Option}; + use std::cmp; + use std::error; + + /// Map key already exists + const EKEY_ALREADY_EXISTS: u64 = 1; + /// Map key is not found + const EKEY_NOT_FOUND: u64 = 2; + // Trying to do an operation on an IteratorPtr that would go out of bounds + const EITER_OUT_OF_BOUNDS: u64 = 3; + /// New key used in replace_key_inplace doesn't respect the order + const ENEW_KEY_NOT_IN_ORDER: u64 = 4; + + /// Individual entry holding (key, value) pair + struct Entry has drop, copy, store { + key: K, + value: V, + } + + /// The OrderedMap datastructure. + enum OrderedMap has drop, copy, store { + /// sorted-vector based implementation of OrderedMap + SortedVectorMap { + /// List of entries, sorted by key. + entries: vector>, + } + } + + /// An iterator pointing to a valid position in an ordered map, or to the end. + /// + /// TODO: Once fields can be (mutable) references, this class will be deprecated. + enum IteratorPtr has copy, drop { + End, + Position { + /// The index of the iterator pointing to. + index: u64, + }, + } + + /// Create a new empty OrderedMap, using default (SortedVectorMap) implementation. + public fun new(): OrderedMap { + OrderedMap::SortedVectorMap { + entries: vector::empty(), + } + } + + /// Create a OrderedMap from a vector of keys and values. + /// Aborts with EKEY_ALREADY_EXISTS if duplicate keys are passed in. + public fun new_from(keys: vector, values: vector): OrderedMap { + let map = new(); + map.add_all(keys, values); + map + } + + /// Number of elements in the map. + public fun length(self: &OrderedMap): u64 { + self.entries.length() + } + + /// Whether map is empty. + public fun is_empty(self: &OrderedMap): bool { + self.entries.is_empty() + } + + /// Add a key/value pair to the map. + /// Aborts with EKEY_ALREADY_EXISTS if key already exist. + public fun add(self: &mut OrderedMap, key: K, value: V) { + let len = self.entries.length(); + let index = binary_search(&key, &self.entries, 0, len); + + // key must not already be inside. + assert!(index >= len || &self.entries[index].key != &key, error::invalid_argument(EKEY_ALREADY_EXISTS)); + self.entries.insert(index, Entry { key, value }); + } + + /// If the key doesn't exist in the map, inserts the key/value, and returns none. + /// Otherwise, updates the value under the given key, and returns the old value. + public fun upsert(self: &mut OrderedMap, key: K, value: V): Option { + let len = self.entries.length(); + let index = binary_search(&key, &self.entries, 0, len); + + if (index < len && &self.entries[index].key == &key) { + let Entry { + key: _, + value: old_value, + } = self.entries.replace(index, Entry { key, value }); + option::some(old_value) + } else { + self.entries.insert(index, Entry { key, value }); + option::none() + } + } + + /// Remove a key/value pair from the map. + /// Aborts with EKEY_NOT_FOUND if `key` doesn't exist. + public fun remove(self: &mut OrderedMap, key: &K): V { + let len = self.entries.length(); + let index = binary_search(key, &self.entries, 0, len); + assert!(index < len, error::invalid_argument(EKEY_NOT_FOUND)); + let Entry { key: old_key, value } = self.entries.remove(index); + assert!(key == &old_key, error::invalid_argument(EKEY_NOT_FOUND)); + value + } + + /// Returns whether map contains a given key. + public fun contains(self: &OrderedMap, key: &K): bool { + !self.find(key).iter_is_end(self) + } + + public fun borrow(self: &OrderedMap, key: &K): &V { + self.find(key).iter_borrow(self) + } + + public fun borrow_mut(self: &mut OrderedMap, key: &K): &mut V { + self.find(key).iter_borrow_mut(self) + } + + /// Changes the key, while keeping the same value attached to it + /// Aborts with EKEY_NOT_FOUND if `old_key` doesn't exist. + /// Aborts with ENEW_KEY_NOT_IN_ORDER if `new_key` doesn't keep the order `old_key` was in. + public(friend) fun replace_key_inplace(self: &mut OrderedMap, old_key: &K, new_key: K) { + let len = self.entries.length(); + let index = binary_search(old_key, &self.entries, 0, len); + assert!(index < len, error::invalid_argument(EKEY_NOT_FOUND)); + + assert!(old_key == &self.entries[index].key, error::invalid_argument(EKEY_NOT_FOUND)); + + // check that after we update the key, order is going to be respected + if (index > 0) { + assert!(cmp::compare(&self.entries[index - 1].key, &new_key).is_lt(), error::invalid_argument(ENEW_KEY_NOT_IN_ORDER)) + }; + + if (index + 1 < len) { + assert!(cmp::compare(&new_key, &self.entries[index + 1].key).is_lt(), error::invalid_argument(ENEW_KEY_NOT_IN_ORDER)) + }; + + self.entries[index].key = new_key; + } + + /// Add multiple key/value pairs to the map. The keys must not already exist. + /// Aborts with EKEY_ALREADY_EXISTS if key already exist, or duplicate keys are passed in. + public fun add_all(self: &mut OrderedMap, keys: vector, values: vector) { + // TODO: Can be optimized, by sorting keys and values, and then creating map. + keys.zip(values, |key, value| { + self.add(key, value); + }); + } + + /// Add multiple key/value pairs to the map, overwrites values if they exist already, + /// or if duplicate keys are passed in. + public fun upsert_all(self: &mut OrderedMap, keys: vector, values: vector) { + // TODO: Can be optimized, by sorting keys and values, and then creating map. + keys.zip(values, |key, value| { + self.upsert(key, value); + }); + } + + /// Takes all elements from `other` and adds them to `self`, + /// overwritting if any key is already present in self. + public fun append(self: &mut OrderedMap, other: OrderedMap) { + self.append_impl(other); + } + + /// Takes all elements from `other` and adds them to `self`. + /// Aborts with EKEY_ALREADY_EXISTS if `other` has a key already present in `self`. + public fun append_disjoint(self: &mut OrderedMap, other: OrderedMap) { + let overwritten = self.append_impl(other); + assert!(overwritten.length() == 0, error::invalid_argument(EKEY_ALREADY_EXISTS)); + overwritten.destroy_empty(); + } + + /// Takes all elements from `other` and adds them to `self`, returning list of entries in self that were overwritten. + fun append_impl(self: &mut OrderedMap, other: OrderedMap): vector> { + let OrderedMap::SortedVectorMap { + entries: other_entries, + } = other; + let overwritten = vector::empty(); + + if (other_entries.is_empty()) { + other_entries.destroy_empty(); + return overwritten; + }; + + if (self.entries.is_empty()) { + self.entries.append(other_entries); + return overwritten; + }; + + // Optimization: if all elements in `other` are larger than all elements in `self`, we can just move them over. + if (cmp::compare(&self.entries.borrow(self.entries.length() - 1).key, &other_entries.borrow(0).key).is_lt()) { + self.entries.append(other_entries); + return overwritten; + }; + + // In O(n), traversing from the back, build reverse sorted result, and then reverse it back + let reverse_result = vector::empty(); + let cur_i = self.entries.length() - 1; + let other_i = other_entries.length() - 1; + + // after the end of the loop, other_entries is empty, and any leftover is in entries + loop { + let ord = cmp::compare(&self.entries[cur_i].key, &other_entries[other_i].key); + if (ord.is_gt()) { + reverse_result.push_back(self.entries.pop_back()); + if (cur_i == 0) { + // make other_entries empty, and rest in entries. + // TODO cannot use mem::swap until it is public/released + // mem::swap(&mut self.entries, &mut other_entries); + self.entries.append(other_entries); + break; + } else { + cur_i -= 1; + }; + } else { + // is_lt or is_eq + if (ord.is_eq()) { + // we skip the entries one, and below put in the result one from other. + overwritten.push_back(self.entries.pop_back()); + }; + + reverse_result.push_back(other_entries.pop_back()); + if (other_i == 0) { + other_entries.destroy_empty(); + break; + } else { + other_i -= 1; + }; + }; + }; + + self.entries.reverse_append(reverse_result); + + overwritten + } + + /// Splits the collection into two, such to leave `self` with `at` number of elements. + /// Returns a newly allocated map containing the elements in the range [at, len). + /// After the call, the original map will be left containing the elements [0, at). + public fun trim(self: &mut OrderedMap, at: u64): OrderedMap { + let rest = self.entries.trim(at); + + OrderedMap::SortedVectorMap { + entries: rest + } + } + + public fun borrow_front(self: &OrderedMap): (&K, &V) { + let entry = self.entries.borrow(0); + (&entry.key, &entry.value) + } + + public fun borrow_back(self: &OrderedMap): (&K, &V) { + let entry = self.entries.borrow(self.entries.length() - 1); + (&entry.key, &entry.value) + } + + public fun pop_front(self: &mut OrderedMap): (K, V) { + let Entry { key, value } = self.entries.remove(0); + (key, value) + } + + public fun pop_back(self: &mut OrderedMap): (K, V) { + let Entry { key, value } = self.entries.pop_back(); + (key, value) + } + + public fun prev_key(self: &OrderedMap, key: &K): Option { + let it = self.lower_bound(key); + if (it.iter_is_begin(self)) { + option::none() + } else { + option::some(*it.iter_prev(self).iter_borrow_key(self)) + } + } + + public fun next_key(self: &OrderedMap, key: &K): Option { + let it = self.lower_bound(key); + if (it.iter_is_end(self)) { + option::none() + } else { + let cur_key = it.iter_borrow_key(self); + if (key == cur_key) { + let it = it.iter_next(self); + if (it.iter_is_end(self)) { + option::none() + } else { + option::some(*it.iter_borrow_key(self)) + } + } else { + option::some(*cur_key) + } + } + } + + // TODO: see if it is more understandable if iterator points between elements, + // and there is iter_borrow_next and iter_borrow_prev, and provide iter_insert. + + /// Returns an iterator pointing to the first element that is greater or equal to the provided + /// key, or an end iterator if such element doesn't exist. + public(friend) fun lower_bound(self: &OrderedMap, key: &K): IteratorPtr { + let entries = &self.entries; + let len = entries.length(); + + let index = binary_search(key, entries, 0, len); + if (index == len) { + self.new_end_iter() + } else { + new_iter(index) + } + } + + /// Returns an iterator pointing to the element that equals to the provided key, or an end + /// iterator if the key is not found. + public(friend) fun find(self: &OrderedMap, key: &K): IteratorPtr { + let lower_bound = self.lower_bound(key); + if (lower_bound.iter_is_end(self)) { + lower_bound + } else if (lower_bound.iter_borrow_key(self) == key) { + lower_bound + } else { + self.new_end_iter() + } + } + + /// Returns the begin iterator. + public(friend) fun new_begin_iter(self: &OrderedMap): IteratorPtr { + if (self.is_empty()) { + return IteratorPtr::End; + }; + + new_iter(0) + } + + /// Returns the end iterator. + public(friend) fun new_end_iter(self: &OrderedMap): IteratorPtr { + IteratorPtr::End + } + + // ========== Section for methods opearting on iterators ======== + // Note: After any modifications to the map, do not use any of the iterators obtained beforehand. + // Operations on iterators after map is modified are unexpected/incorrect. + + /// Returns the next iterator, or none if already at the end iterator. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_next(self: IteratorPtr, map: &OrderedMap): IteratorPtr { + assert!(!self.iter_is_end(map), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + + let index = self.index + 1; + if (index < map.entries.length()) { + new_iter(index) + } else { + map.new_end_iter() + } + } + + /// Returns the previous iterator, or none if already at the begin iterator. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_prev(self: IteratorPtr, map: &OrderedMap): IteratorPtr { + assert!(!self.iter_is_begin(map), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + + let index = if (self is IteratorPtr::End) { + map.entries.length() - 1 + } else { + self.index - 1 + }; + + new_iter(index) + } + + /// Returns whether the iterator is a begin iterator. + public(friend) fun iter_is_begin(self: &IteratorPtr, map: &OrderedMap): bool { + if (self is IteratorPtr::End) { + map.is_empty() + } else { + self.index == 0 + } + } + + /// Returns true iff the iterator is a begin iterator from a non-empty collection. + /// (I.e. if iterator points to a valid element) + /// This method doesn't require having access to map, unlike iter_is_begin. + public(friend) fun iter_is_begin_from_non_empty(self: &IteratorPtr): bool { + if (self is IteratorPtr::End) { + false + } else { + self.index == 0 + } + } + + /// Returns whether the iterator is an end iterator. + public(friend) fun iter_is_end(self: &IteratorPtr, _map: &OrderedMap): bool { + self is IteratorPtr::End + } + + /// Borrows the key given iterator points to. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_borrow_key(self: &IteratorPtr, map: &OrderedMap): &K { + assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + + &map.entries.borrow(self.index).key + } + + /// Borrows the value given iterator points to. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_borrow(self: IteratorPtr, map: &OrderedMap): &V { + assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + &map.entries.borrow(self.index).value + } + + /// Mutably borrows the value iterator points to. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_borrow_mut(self: IteratorPtr, map: &mut OrderedMap): &mut V { + assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + &mut map.entries.borrow_mut(self.index).value + } + + /// Removes (key, value) pair iterator points to, returning the previous value. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_remove(self: IteratorPtr, map: &mut OrderedMap): V { + assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + + let Entry { key: _, value } = map.entries.remove(self.index); + value + } + + /// Replaces the value iterator is pointing to, returning the previous value. + /// Aborts with EITER_OUT_OF_BOUNDS if iterator is pointing to the end. + /// Note: Requires that the map is not changed after the input iterator is generated. + public(friend) fun iter_replace(self: IteratorPtr, map: &mut OrderedMap, value: V): V { + assert!(!(self is IteratorPtr::End), error::invalid_argument(EITER_OUT_OF_BOUNDS)); + + // TODO once mem::replace is public/released, update to: + // let entry = map.entries.borrow_mut(self.index); + // mem::replace(&mut entry.value, value) + let key = map.entries[self.index].key; + let Entry { + key: _, + value: prev_value, + } = map.entries.replace(self.index, Entry { key, value }); + prev_value + } + + /// Add key/value pair to the map, at the iterator position (before the element at the iterator position). + /// Aborts with ENEW_KEY_NOT_IN_ORDER is key is not larger than the key before the iterator, + /// or smaller than the key at the iterator position. + public(friend) fun iter_add(self: IteratorPtr, map: &mut OrderedMap, key: K, value: V) { + let len = map.entries.length(); + let insert_index = if (self is IteratorPtr::End) { + len + } else { + self.index + }; + + if (insert_index > 0) { + assert!(cmp::compare(&map.entries[insert_index - 1].key, &key).is_lt(), error::invalid_argument(ENEW_KEY_NOT_IN_ORDER)) + }; + + if (insert_index < len) { + assert!(cmp::compare(&key, &map.entries[insert_index].key).is_lt(), error::invalid_argument(ENEW_KEY_NOT_IN_ORDER)) + }; + + map.entries.insert(insert_index, Entry { key, value }); + } + + /// Destroys empty map. + /// Aborts if `self` is not empty. + public fun destroy_empty(self: OrderedMap) { + let OrderedMap::SortedVectorMap { entries } = self; + // assert!(entries.is_empty(), E_NOT_EMPTY); + entries.destroy_empty(); + } + + // ========= Section with views and inline for-loop methods ======= + + /// Return all keys in the map. This requires keys to be copyable. + public fun keys(self: &OrderedMap): vector { + self.entries.map_ref(|e| { + let e: &Entry = e; + e.key + }) + } + + /// Return all values in the map. This requires values to be copyable. + public fun values(self: &OrderedMap): vector { + self.entries.map_ref(|e| { + let e: &Entry = e; + e.value + }) + } + + /// Transform the map into two vectors with the keys and values respectively + /// Primarily used to destroy a map + public fun to_vec_pair(self: OrderedMap): (vector, vector) { + let keys: vector = vector::empty(); + let values: vector = vector::empty(); + let OrderedMap::SortedVectorMap { entries } = self; + entries.for_each(|e| { + let Entry { key, value } = e; + keys.push_back(key); + values.push_back(value); + }); + (keys, values) + } + + /// For maps that cannot be dropped this is a utility to destroy them + /// using lambdas to destroy the individual keys and values. + public inline fun destroy( + self: OrderedMap, + dk: |K|, + dv: |V| + ) { + let (keys, values) = self.to_vec_pair(); + keys.destroy(|_k| dk(_k)); + values.destroy(|_v| dv(_v)); + } + + /// Apply the function to each key-value pair in the map, consuming it. + public inline fun for_each( + self: OrderedMap, + f: |K, V| + ) { + let (keys, values) = self.to_vec_pair(); + keys.zip(values, |k, v| f(k, v)); + } + + /// Apply the function to a reference of each key-value pair in the map. + /// + /// Current implementation is O(n * log(n)). After function values will be optimized + /// to O(n). + public inline fun for_each_ref(self: &OrderedMap, f: |&K, &V|) { + // This implementation is innefficient: O(log(n)) for next_key / borrow lookups every time, + // but is the only one available through the public API. + if (!self.is_empty()) { + let (k, v) = self.borrow_front(); + f(k, v); + + let cur_k = self.next_key(k); + while (cur_k.is_some()) { + let k = cur_k.destroy_some(); + f(&k, self.borrow(&k)); + + cur_k = self.next_key(&k); + }; + }; + + // TODO: if we make iterator api public update to: + // let iter = self.new_begin_iter(); + // while (!iter.iter_is_end(self)) { + // f(iter.iter_borrow_key(self), iter.iter_borrow(self)); + // iter = iter.iter_next(self); + // } + + // TODO: once move supports private functions udpate to: + // vector::for_each_ref( + // &self.entries, + // |entry| { + // f(&entry.key, &entry.value) + // } + // ); + } + + // TODO: Temporary friend implementaiton, until for_each_ref can be made efficient. + public(friend) inline fun for_each_ref_friend(self: &OrderedMap, f: |&K, &V|) { + let iter = self.new_begin_iter(); + while (!iter.iter_is_end(self)) { + f(iter.iter_borrow_key(self), iter.iter_borrow(self)); + iter = iter.iter_next(self); + } + } + + /// Apply the function to a mutable reference of each key-value pair in the map. + /// + /// Current implementation is O(n * log(n)). After function values will be optimized + /// to O(n). + public inline fun for_each_mut(self: &mut OrderedMap, f: |&K, &mut V|) { + // This implementation is innefficient: O(log(n)) for next_key / borrow lookups every time, + // but is the only one available through the public API. + if (!self.is_empty()) { + let (k, _v) = self.borrow_front(); + + let k = *k; + let done = false; + while (!done) { + f(&k, self.borrow_mut(&k)); + + let cur_k = self.next_key(&k); + if (cur_k.is_some()) { + k = cur_k.destroy_some(); + } else { + done = true; + } + }; + }; + + // TODO: if we make iterator api public update to: + // let iter = self.new_begin_iter(); + // while (!iter.iter_is_end(self)) { + // let key = *iter.iter_borrow_key(self); + // f(key, iter.iter_borrow_mut(self)); + // iter = iter.iter_next(self); + // } + + // TODO: once move supports private functions udpate to: + // vector::for_each_mut( + // &mut self.entries, + // |entry| { + // f(&mut entry.key, &mut entry.value) + // } + // ); + } + + // ========= Section with private methods =============== + + inline fun new_iter(index: u64): IteratorPtr { + IteratorPtr::Position { + index: index, + } + } + + // return index containing the key, or insert position. + // I.e. index of first element that has key larger or equal to the passed `key` argument. + fun binary_search(key: &K, entries: &vector>, start: u64, end: u64): u64 { + let l = start; + let r = end; + while (l != r) { + let mid = l + ((r - l) >> 1); + let comparison = cmp::compare(&entries.borrow(mid).key, key); + if (comparison.is_lt()) { + l = mid + 1; + } else { + r = mid; + }; + }; + l + } + + // see if useful, and add + // + // public fun iter_num_below(self: IteratorPtr, map: &OrderedMap): u64 { + // if (self.iter_is_end()) { + // map.entries.length() + // } else { + // self.index + // } + // } + + spec module { + pragma verify = false; + } + + // ================= Section for tests ===================== + + #[test_only] + fun print_map(self: &OrderedMap) { + aptos_std::debug::print(&self.entries); + } + + #[test_only] + public fun validate_ordered(self: &OrderedMap) { + let len = self.entries.length(); + let i = 1; + while (i < len) { + assert!(cmp::compare(&self.entries.borrow(i).key, &self.entries.borrow(i - 1).key).is_gt(), 1); + i += 1; + }; + } + + #[test_only] + fun validate_iteration(self: &OrderedMap) { + let expected_num_elements = self.length(); + let num_elements = 0; + let it = self.new_begin_iter(); + while (!it.iter_is_end(self)) { + num_elements += 1; + it = it.iter_next(self); + }; + assert!(num_elements == expected_num_elements, 2); + + let num_elements = 0; + let it = self.new_end_iter(); + while (!it.iter_is_begin(self)) { + it = it.iter_prev(self); + num_elements += 1; + }; + assert!(num_elements == expected_num_elements, 3); + } + + #[test_only] + fun validate_map(self: &OrderedMap) { + self.validate_ordered(); + self.validate_iteration(); + } + + #[test] + fun test_map_small() { + let map = new(); + map.validate_map(); + map.add(1, 1); + map.validate_map(); + map.add(2, 2); + map.validate_map(); + let r1 = map.upsert(3, 3); + map.validate_map(); + assert!(r1 == option::none(), 4); + map.add(4, 4); + map.validate_map(); + let r2 = map.upsert(4, 8); + map.validate_map(); + assert!(r2 == option::some(4), 5); + map.add(5, 5); + map.validate_map(); + map.add(6, 6); + map.validate_map(); + + map.remove(&5); + map.validate_map(); + map.remove(&4); + map.validate_map(); + map.remove(&1); + map.validate_map(); + map.remove(&3); + map.validate_map(); + map.remove(&2); + map.validate_map(); + map.remove(&6); + map.validate_map(); + + map.destroy_empty(); + } + + #[test] + fun test_add_remove_many() { + let map = new(); + + assert!(map.length() == 0, 0); + assert!(!map.contains(&3), 1); + map.add(3, 1); + assert!(map.length() == 1, 2); + assert!(map.contains(&3), 3); + assert!(map.borrow(&3) == &1, 4); + *map.borrow_mut(&3) = 2; + assert!(map.borrow(&3) == &2, 5); + + assert!(!map.contains(&2), 6); + map.add(2, 5); + assert!(map.length() == 2, 7); + assert!(map.contains(&2), 8); + assert!(map.borrow(&2) == &5, 9); + *map.borrow_mut(&2) = 9; + assert!(map.borrow(&2) == &9, 10); + + map.remove(&2); + assert!(map.length() == 1, 11); + assert!(!map.contains(&2), 12); + assert!(map.borrow(&3) == &2, 13); + + map.remove(&3); + assert!(map.length() == 0, 14); + assert!(!map.contains(&3), 15); + + map.destroy_empty(); + } + + #[test] + fun test_add_all() { + let map = new(); + + assert!(map.length() == 0, 0); + map.add_all(vector[2, 1, 3], vector[20, 10, 30]); + + assert!(map == new_from(vector[1, 2, 3], vector[10, 20, 30]), 1); + + assert!(map.length() == 3, 1); + assert!(map.borrow(&1) == &10, 2); + assert!(map.borrow(&2) == &20, 3); + assert!(map.borrow(&3) == &30, 4); + } + + #[test] + #[expected_failure(abort_code = 0x20002, location = Self)] /// EKEY_ALREADY_EXISTS + fun test_add_all_mismatch() { + new_from(vector[1, 3], vector[10]); + } + + #[test] + fun test_upsert_all() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.upsert_all(vector[7, 2, 3], vector[70, 20, 35]); + assert!(map == new_from(vector[1, 2, 3, 5, 7], vector[10, 20, 35, 50, 70]), 1); + } + + #[test] + #[expected_failure(abort_code = 0x10001, location = Self)] /// EKEY_ALREADY_EXISTS + fun test_new_from_duplicate() { + new_from(vector[1, 3, 1, 5], vector[10, 30, 11, 50]); + } + + #[test] + #[expected_failure(abort_code = 0x20002, location = Self)] /// EKEY_ALREADY_EXISTS + fun test_upsert_all_mismatch() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.upsert_all(vector[2], vector[20, 35]); + } + + #[test] + fun test_to_vec_pair() { + let (keys, values) = new_from(vector[3, 1, 5], vector[30, 10, 50]).to_vec_pair(); + assert!(keys == vector[1, 3, 5], 1); + assert!(values == vector[10, 30, 50], 2); + } + + #[test] + fun test_keys() { + let map = new(); + assert!(map.keys() == vector[], 0); + map.add(2, 1); + map.add(3, 1); + + assert!(map.keys() == vector[2, 3], 0); + } + + #[test] + fun test_values() { + let map = new(); + assert!(map.values() == vector[], 0); + map.add(2, 1); + map.add(3, 2); + + assert!(map.values() == vector[1, 2], 0); + } + + #[test] + fun test_for_each_variants() { + let keys = vector[1, 3, 5]; + let values = vector[10, 30, 50]; + let map = new_from(keys, values); + + let index = 0; + map.for_each_ref(|k, v| { + assert!(keys[index] == *k); + assert!(values[index] == *v); + index += 1; + }); + + let index = 0; + map.for_each_mut(|k, v| { + assert!(keys[index] == *k); + assert!(values[index] == *v); + *v += 1; + index += 1; + }); + + let index = 0; + map.for_each(|k, v| { + assert!(keys[index] == k); + assert!(values[index] + 1 == v); + index += 1; + }); + } + + #[test] + #[expected_failure(abort_code = 0x10001, location = Self)] /// EKEY_ALREADY_EXISTS + fun test_add_twice() { + let map = new(); + map.add(3, 1); + map.add(3, 1); + + map.remove(&3); + map.destroy_empty(); + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = Self)] /// EKEY_NOT_FOUND + fun test_remove_twice_1() { + let map = new(); + map.add(3, 1); + map.remove(&3); + map.remove(&3); + + map.destroy_empty(); + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = Self)] /// EKEY_NOT_FOUND + fun test_remove_twice_2() { + let map = new(); + map.add(3, 1); + map.add(4, 1); + map.remove(&3); + map.remove(&3); + + map.destroy_empty(); + } + + #[test] + fun test_upsert_test() { + let map = new(); + // test adding 3 elements using upsert + map.upsert::(1, 1); + map.upsert(2, 2); + map.upsert(3, 3); + + assert!(map.length() == 3, 0); + assert!(map.contains(&1), 1); + assert!(map.contains(&2), 2); + assert!(map.contains(&3), 3); + assert!(map.borrow(&1) == &1, 4); + assert!(map.borrow(&2) == &2, 5); + assert!(map.borrow(&3) == &3, 6); + + // change mapping 1->1 to 1->4 + map.upsert(1, 4); + + assert!(map.length() == 3, 7); + assert!(map.contains(&1), 8); + assert!(map.borrow(&1) == &4, 9); + } + + #[test] + fun test_append() { + { + let map = new(); + let other = new(); + map.append(other); + assert!(map.is_empty(), 0); + }; + { + let map = new_from(vector[1, 2], vector[10, 20]); + let other = new(); + map.append(other); + assert!(map == new_from(vector[1, 2], vector[10, 20]), 1); + }; + { + let map = new(); + let other = new_from(vector[1, 2], vector[10, 20]); + map.append(other); + assert!(map == new_from(vector[1, 2], vector[10, 20]), 2); + }; + { + let map = new_from(vector[1, 2, 3], vector[10, 20, 30]); + let other = new_from(vector[4, 5], vector[40, 50]); + map.append(other); + assert!(map == new_from(vector[1, 2, 3, 4, 5], vector[10, 20, 30, 40, 50]), 3); + }; + { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + let other = new_from(vector[2, 4], vector[20, 40]); + map.append(other); + assert!(map == new_from(vector[1, 2, 3, 4, 5], vector[10, 20, 30, 40, 50]), 4); + }; + { + let map = new_from(vector[2, 4], vector[20, 40]); + let other = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.append(other); + assert!(map == new_from(vector[1, 2, 3, 4, 5], vector[10, 20, 30, 40, 50]), 6); + }; + { + let map = new_from(vector[1], vector[10]); + let other = new_from(vector[1], vector[11]); + map.append(other); + assert!(map == new_from(vector[1], vector[11]), 7); + } + } + + #[test] + fun test_append_disjoint() { + let map = new_from(vector[1, 2, 3], vector[10, 20, 30]); + let other = new_from(vector[4, 5], vector[40, 50]); + map.append_disjoint(other); + assert!(map == new_from(vector[1, 2, 3, 4, 5], vector[10, 20, 30, 40, 50]), 1); + } + + #[test] + #[expected_failure(abort_code = 0x10001, location = Self)] /// EKEY_ALREADY_EXISTS + fun test_append_disjoint_abort() { + let map = new_from(vector[1], vector[10]); + let other = new_from(vector[1], vector[11]); + map.append_disjoint(other); + } + + #[test] + fun test_trim() { + let map = new_from(vector[1, 2, 3], vector[10, 20, 30]); + let rest = map.trim(2); + assert!(map == new_from(vector[1, 2], vector[10, 20]), 1); + assert!(rest == new_from(vector[3], vector[30]), 2); + } + + #[test] + fun test_non_iterator_ordering() { + let map = new_from(vector[1, 2, 3], vector[10, 20, 30]); + assert!(map.prev_key(&1).is_none(), 1); + assert!(map.next_key(&1) == option::some(2), 1); + + assert!(map.prev_key(&2) == option::some(1), 2); + assert!(map.next_key(&2) == option::some(3), 3); + + assert!(map.prev_key(&3) == option::some(2), 4); + assert!(map.next_key(&3).is_none(), 5); + + let (front_k, front_v) = map.borrow_front(); + assert!(front_k == &1, 6); + assert!(front_v == &10, 7); + + let (back_k, back_v) = map.borrow_back(); + assert!(back_k == &3, 8); + assert!(back_v == &30, 9); + + let (front_k, front_v) = map.pop_front(); + assert!(front_k == 1, 10); + assert!(front_v == 10, 11); + + let (back_k, back_v) = map.pop_back(); + assert!(back_k == 3, 12); + assert!(back_v == 30, 13); + } + + #[test] + fun test_replace_key_inplace() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.replace_key_inplace(&5, 6); + assert!(map == new_from(vector[1, 3, 6], vector[10, 30, 50]), 1); + map.replace_key_inplace(&3, 4); + assert!(map == new_from(vector[1, 4, 6], vector[10, 30, 50]), 2); + map.replace_key_inplace(&1, 0); + assert!(map == new_from(vector[0, 4, 6], vector[10, 30, 50]), 3); + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = Self)] /// EKEY_NOT_FOUND + fun test_replace_key_inplace_not_found_1() { + let map = new_from(vector[1, 3, 6], vector[10, 30, 50]); + map.replace_key_inplace(&4, 5); + + } + + #[test] + #[expected_failure(abort_code = 0x10002, location = Self)] /// EKEY_NOT_FOUND + fun test_replace_key_inplace_not_found_2() { + let map = new_from(vector[1, 3, 6], vector[10, 30, 50]); + map.replace_key_inplace(&7, 8); + } + + #[test] + #[expected_failure(abort_code = 0x10004, location = Self)] /// ENEW_KEY_NOT_IN_ORDER + fun test_replace_key_inplace_not_in_order_1() { + let map = new_from(vector[1, 3, 6], vector[10, 30, 50]); + map.replace_key_inplace(&3, 7); + } + + #[test] + #[expected_failure(abort_code = 0x10004, location = Self)] /// ENEW_KEY_NOT_IN_ORDER + fun test_replace_key_inplace_not_in_order_2() { + let map = new_from(vector[1, 3, 6], vector[10, 30, 50]); + map.replace_key_inplace(&1, 3); + } + + #[test] + #[expected_failure(abort_code = 0x10004, location = Self)] /// ENEW_KEY_NOT_IN_ORDER + fun test_replace_key_inplace_not_in_order_3() { + let map = new_from(vector[1, 3, 6], vector[10, 30, 50]); + map.replace_key_inplace(&6, 3); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + public fun test_iter_end_next_abort() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_end_iter().iter_next(&map); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + public fun test_iter_end_borrow_key_abort() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_end_iter().iter_borrow_key(&map); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + public fun test_iter_end_borrow_abort() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_end_iter().iter_borrow(&map); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + public fun test_iter_end_borrow_mut_abort() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_end_iter().iter_borrow_mut(&mut map); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + public fun test_iter_begin_prev_abort() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_begin_iter().iter_prev(&map); + } + + #[test] + public fun test_iter_is_begin_from_non_empty() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + let iter = map.new_begin_iter(); + assert!(iter.iter_is_begin(&map), 1); + assert!(iter.iter_is_begin_from_non_empty(), 1); + + iter = iter.iter_next(&map); + assert!(!iter.iter_is_begin(&map), 1); + assert!(!iter.iter_is_begin_from_non_empty(), 1); + + let map = new(); + let iter = map.new_begin_iter(); + assert!(iter.iter_is_begin(&map), 1); + assert!(!iter.iter_is_begin_from_non_empty(), 1); + } + + #[test] + public fun test_iter_remove() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_begin_iter().iter_next(&map).iter_remove(&mut map); + assert!(map == new_from(vector[1, 5], vector[10, 50]), 1); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + public fun test_iter_remove_abort() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_end_iter().iter_remove(&mut map); + } + + #[test] + public fun test_iter_replace() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_begin_iter().iter_next(&map).iter_replace(&mut map, 35); + assert!(map == new_from(vector[1, 3, 5], vector[10, 35, 50]), 1); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = Self)] /// EITER_OUT_OF_BOUNDS + public fun test_iter_replace_abort() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_end_iter().iter_replace(&mut map, 35); + } + + #[test] + public fun test_iter_add() { + { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_begin_iter().iter_add(&mut map, 0, 5); + assert!(map == new_from(vector[0, 1, 3, 5], vector[5, 10, 30, 50]), 1); + }; + { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_begin_iter().iter_next(&map).iter_add(&mut map, 2, 20); + assert!(map == new_from(vector[1, 2, 3, 5], vector[10, 20, 30, 50]), 2); + }; + { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_end_iter().iter_add(&mut map, 6, 60); + assert!(map == new_from(vector[1, 3, 5, 6], vector[10, 30, 50, 60]), 3); + }; + { + let map = new(); + map.new_end_iter().iter_add(&mut map, 1, 10); + assert!(map == new_from(vector[1], vector[10]), 4); + }; + } + + #[test] + #[expected_failure(abort_code = 0x10004, location = Self)] /// ENEW_KEY_NOT_IN_ORDER + public fun test_iter_add_abort_1() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_begin_iter().iter_add(&mut map, 1, 5); + } + + #[test] + #[expected_failure(abort_code = 0x10004, location = Self)] /// ENEW_KEY_NOT_IN_ORDER + public fun test_iter_add_abort_2() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_end_iter().iter_add(&mut map, 5, 55); + } + + #[test] + #[expected_failure(abort_code = 0x10004, location = Self)] /// ENEW_KEY_NOT_IN_ORDER + public fun test_iter_add_abort_3() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_begin_iter().iter_next(&map).iter_add(&mut map, 1, 15); + } + + #[test] + #[expected_failure(abort_code = 0x10004, location = Self)] /// ENEW_KEY_NOT_IN_ORDER + public fun test_iter_add_abort_4() { + let map = new_from(vector[1, 3, 5], vector[10, 30, 50]); + map.new_begin_iter().iter_next(&map).iter_add(&mut map, 3, 25); + } + + #[test_only] + public fun large_dataset(): vector { + vector[383, 886, 777, 915, 793, 335, 386, 492, 649, 421, 362, 27, 690, 59, 763, 926, 540, 426, 172, 736, 211, 368, 567, 429, 782, 530, 862, 123, 67, 135, 929, 802, 22, 58, 69, 167, 393, 456, 11, 42, 229, 373, 421, 919, 784, 537, 198, 324, 315, 370, 413, 526, 91, 980, 956, 873, 862, 170, 996, 281, 305, 925, 84, 327, 336, 505, 846, 729, 313, 857, 124, 895, 582, 545, 814, 367, 434, 364, 43, 750, 87, 808, 276, 178, 788, 584, 403, 651, 754, 399, 932, 60, 676, 368, 739, 12, 226, 586, 94, 539, 795, 570, 434, 378, 467, 601, 97, 902, 317, 492, 652, 756, 301, 280, 286, 441, 865, 689, 444, 619, 440, 729, 31, 117, 97, 771, 481, 675, 709, 927, 567, 856, 497, 353, 586, 965, 306, 683, 219, 624, 528, 871, 732, 829, 503, 19, 270, 368, 708, 715, 340, 149, 796, 723, 618, 245, 846, 451, 921, 555, 379, 488, 764, 228, 841, 350, 193, 500, 34, 764, 124, 914, 987, 856, 743, 491, 227, 365, 859, 936, 432, 551, 437, 228, 275, 407, 474, 121, 858, 395, 29, 237, 235, 793, 818, 428, 143, 11, 928, 529] + } + + #[test_only] + public fun large_dataset_shuffled(): vector { + vector[895, 228, 530, 784, 624, 335, 729, 818, 373, 456, 914, 226, 368, 750, 428, 956, 437, 586, 763, 235, 567, 91, 829, 690, 434, 178, 584, 426, 228, 407, 237, 497, 764, 135, 124, 421, 537, 270, 11, 367, 378, 856, 529, 276, 729, 618, 929, 227, 149, 788, 925, 675, 121, 795, 306, 198, 421, 350, 555, 441, 403, 932, 368, 383, 928, 841, 440, 771, 364, 902, 301, 987, 467, 873, 921, 11, 365, 340, 739, 492, 540, 386, 919, 723, 539, 87, 12, 782, 324, 862, 689, 395, 488, 793, 709, 505, 582, 814, 245, 980, 936, 736, 619, 69, 370, 545, 764, 886, 305, 551, 19, 865, 229, 432, 29, 754, 34, 676, 43, 846, 451, 491, 871, 500, 915, 708, 586, 60, 280, 652, 327, 172, 856, 481, 796, 474, 219, 651, 170, 281, 84, 97, 715, 857, 353, 862, 393, 567, 368, 777, 97, 315, 526, 94, 31, 167, 123, 413, 503, 193, 808, 649, 143, 42, 444, 317, 67, 926, 434, 211, 379, 570, 683, 965, 732, 927, 429, 859, 313, 528, 996, 117, 492, 336, 22, 399, 275, 802, 743, 124, 846, 58, 858, 286, 756, 601, 27, 59, 362, 793] + } + + #[test] + fun test_map_large() { + let map = new(); + let data = large_dataset(); + let shuffled_data = large_dataset_shuffled(); + + let len = data.length(); + for (i in 0..len) { + let element = *data.borrow(i); + map.upsert(element, element); + map.validate_map(); + }; + + for (i in 0..len) { + let element = shuffled_data.borrow(i); + let it = map.find(element); + assert!(!it.iter_is_end(&map), 6); + assert!(it.iter_borrow_key(&map) == element, 7); + + let it_next = it.iter_next(&map); + let it_after = map.lower_bound(&(*element + 1)); + + assert!(it_next == it_after, 8); + }; + + let removed = vector::empty(); + for (i in 0..len) { + let element = shuffled_data.borrow(i); + if (!removed.contains(element)) { + removed.push_back(*element); + map.remove(element); + map.validate_map(); + } else { + assert!(!map.contains(element)); + }; + }; + + map.destroy_empty(); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/delegation_pool.move b/aptos-move/framework/aptos-framework/sources/delegation_pool.move index be1643ca6b197..7cfa8d440ae0c 100644 --- a/aptos-move/framework/aptos-framework/sources/delegation_pool.move +++ b/aptos-move/framework/aptos-framework/sources/delegation_pool.move @@ -124,6 +124,7 @@ module aptos_framework::delegation_pool { use aptos_framework::aptos_governance; use aptos_framework::coin; use aptos_framework::event::{Self, EventHandle, emit}; + use aptos_framework::permissioned_signer; use aptos_framework::stake; use aptos_framework::stake::get_operator; use aptos_framework::staking_config; @@ -215,6 +216,12 @@ module aptos_framework::delegation_pool { /// Cannot unlock the accumulated active stake of NULL_SHAREHOLDER(0x0). const ECANNOT_UNLOCK_NULL_SHAREHOLDER: u64 = 27; + /// Signer does not have permission to perform delegation logic. + const ENO_DELEGATION_PERMISSION: u64 = 28; + + /// Use delegator voting flow instead. Delegation pools can no longer specify a single delegated voter. + const ECAN_NO_LONGER_SET_DELEGATED_VOTER: u64 = 29; + const MAX_U64: u64 = 18446744073709551615; /// Maximum operator percentage fee(of double digit precision): 22.85% is represented as 2285 @@ -346,6 +353,11 @@ module aptos_framework::delegation_pool { allowlist: SmartTable, } + enum DelegationPermission has copy, drop, store { + DelegationPoolManagementPermission, + StakeManagementPermission, + } + #[event] struct AddStake has drop, store { pool_address: address, @@ -832,6 +844,29 @@ module aptos_framework::delegation_pool { allowlist } + /// Permissions + inline fun check_delegation_pool_management_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, DelegationPermission::DelegationPoolManagementPermission {}), + error::permission_denied(ENO_DELEGATION_PERMISSION), + ); + } + + public fun grant_delegation_pool_management_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, DelegationPermission::DelegationPoolManagementPermission {}) + } + + inline fun check_stake_management_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, DelegationPermission::StakeManagementPermission {}), + error::permission_denied(ENO_DELEGATION_PERMISSION), + ); + } + + public fun grant_stake_management_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, DelegationPermission::StakeManagementPermission {}) + } + /// Initialize a delegation pool of custom fixed `operator_commission_percentage`. /// A resource account is created from `owner` signer and its supplied `delegation_pool_creation_seed` /// to host the delegation pool resource and own the underlying stake pool. @@ -841,7 +876,7 @@ module aptos_framework::delegation_pool { operator_commission_percentage: u64, delegation_pool_creation_seed: vector, ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { - assert!(features::delegation_pools_enabled(), error::invalid_state(EDELEGATION_POOLS_DISABLED)); + check_delegation_pool_management_permission(owner); let owner_address = signer::address_of(owner); assert!(!owner_cap_exists(owner_address), error::already_exists(EOWNER_CAP_ALREADY_EXISTS)); assert!(operator_commission_percentage <= MAX_FEE, error::invalid_argument(EINVALID_COMMISSION_PERCENTAGE)); @@ -882,10 +917,7 @@ module aptos_framework::delegation_pool { move_to(owner, DelegationPoolOwnership { pool_address }); // All delegation pool enable partial governance voting by default once the feature flag is enabled. - if (features::partial_governance_voting_enabled( - ) && features::delegation_pool_partial_governance_voting_enabled()) { - enable_partial_governance_voting(pool_address); - } + enable_partial_governance_voting(pool_address); } #[view] @@ -903,11 +935,6 @@ module aptos_framework::delegation_pool { public entry fun enable_partial_governance_voting( pool_address: address, ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { - assert!(features::partial_governance_voting_enabled(), error::invalid_state(EDISABLED_FUNCTION)); - assert!( - features::delegation_pool_partial_governance_voting_enabled(), - error::invalid_state(EDISABLED_FUNCTION) - ); assert_delegation_pool_exists(pool_address); // synchronize delegation and stake pools before any user operation. synchronize_delegation_pool(pool_address); @@ -941,6 +968,7 @@ module aptos_framework::delegation_pool { voting_power: u64, should_pass: bool ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(voter); assert_partial_governance_voting_enabled(pool_address); // synchronize delegation and stake pools before any user operation. synchronize_delegation_pool(pool_address); @@ -954,6 +982,7 @@ module aptos_framework::delegation_pool { if (voting_power > remaining_voting_power) { voting_power = remaining_voting_power; }; + aptos_governance::assert_proposal_expiration(pool_address, proposal_id); assert!(voting_power > 0, error::invalid_argument(ENO_VOTING_POWER)); let governance_records = borrow_global_mut(pool_address); @@ -975,18 +1004,18 @@ module aptos_framework::delegation_pool { should_pass, } ); + } else { + event::emit_event( + &mut governance_records.vote_events, + VoteEvent { + voter: voter_address, + proposal_id, + delegation_pool: pool_address, + num_votes: voting_power, + should_pass, + } + ); }; - - event::emit_event( - &mut governance_records.vote_events, - VoteEvent { - voter: voter_address, - proposal_id, - delegation_pool: pool_address, - num_votes: voting_power, - should_pass, - } - ); } /// A voter could create a governance proposal by this function. To successfully create a proposal, the voter's @@ -1000,6 +1029,7 @@ module aptos_framework::delegation_pool { metadata_hash: vector, is_multi_step_proposal: bool, ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(voter); assert_partial_governance_voting_enabled(pool_address); // synchronize delegation and stake pools before any user operation @@ -1032,16 +1062,16 @@ module aptos_framework::delegation_pool { delegation_pool: pool_address, } ); + } else { + event::emit_event( + &mut governance_records.create_proposal_events, + CreateProposalEvent { + proposal_id, + voter: voter_addr, + delegation_pool: pool_address, + } + ); }; - - event::emit_event( - &mut governance_records.create_proposal_events, - CreateProposalEvent { - proposal_id, - voter: voter_addr, - delegation_pool: pool_address, - } - ); } fun assert_owner_cap_exists(owner: address) { @@ -1292,6 +1322,7 @@ module aptos_framework::delegation_pool { owner: &signer, new_operator: address ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_delegation_pool_management_permission(owner); let pool_address = get_owned_pool_address(signer::address_of(owner)); // synchronize delegation and stake pools before any user operation // ensure the old operator is paid its uncommitted commission rewards @@ -1307,9 +1338,7 @@ module aptos_framework::delegation_pool { operator: &signer, new_beneficiary: address ) acquires BeneficiaryForOperator { - assert!(features::operator_beneficiary_change_enabled(), std::error::invalid_state( - EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED - )); + check_stake_management_permission(operator); // The beneficiay address of an operator is stored under the operator's address. // So, the operator does not need to be validated with respect to a staking pool. let operator_addr = signer::address_of(operator); @@ -1332,9 +1361,7 @@ module aptos_framework::delegation_pool { owner: &signer, new_commission_percentage: u64 ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { - assert!(features::commission_change_delegation_pool_enabled(), error::invalid_state( - ECOMMISSION_RATE_CHANGE_NOT_SUPPORTED - )); + check_delegation_pool_management_permission(owner); assert!(new_commission_percentage <= MAX_FEE, error::invalid_argument(EINVALID_COMMISSION_PERCENTAGE)); let owner_address = signer::address_of(owner); let pool_address = get_owned_pool_address(owner_address); @@ -1372,20 +1399,13 @@ module aptos_framework::delegation_pool { }); } - /// Allows an owner to change the delegated voter of the underlying stake pool. + #[deprecated] + /// Deprecated. Use the partial governance voting flow instead. public entry fun set_delegated_voter( - owner: &signer, - new_voter: address - ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { - // No one can change delegated_voter once the partial governance voting feature is enabled. - assert!( - !features::delegation_pool_partial_governance_voting_enabled(), - error::invalid_state(EDEPRECATED_FUNCTION) - ); - let pool_address = get_owned_pool_address(signer::address_of(owner)); - // synchronize delegation and stake pools before any user operation - synchronize_delegation_pool(pool_address); - stake::set_delegated_voter(&retrieve_stake_pool_owner(borrow_global(pool_address)), new_voter); + _owner: &signer, + _new_voter: address + ) { + abort ECAN_NO_LONGER_SET_DELEGATED_VOTER } /// Allows a delegator to delegate its voting power to a voter. If this delegator already has a delegated voter, @@ -1395,6 +1415,7 @@ module aptos_framework::delegation_pool { pool_address: address, new_voter: address ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(delegator); assert_partial_governance_voting_enabled(pool_address); // synchronize delegation and stake pools before any user operation @@ -1439,19 +1460,20 @@ module aptos_framework::delegation_pool { delegator: delegator_address, voter: new_voter, }) + } else { + event::emit_event(&mut governance_records.delegate_voting_power_events, DelegateVotingPowerEvent { + pool_address, + delegator: delegator_address, + voter: new_voter, + }); }; - - event::emit_event(&mut governance_records.delegate_voting_power_events, DelegateVotingPowerEvent { - pool_address, - delegator: delegator_address, - voter: new_voter, - }); } /// Enable delegators allowlisting as the pool owner. public entry fun enable_delegators_allowlisting( owner: &signer, ) acquires DelegationPoolOwnership, DelegationPool { + check_delegation_pool_management_permission(owner); assert!( features::delegation_pool_allowlisting_enabled(), error::invalid_state(EDELEGATORS_ALLOWLISTING_NOT_SUPPORTED) @@ -1470,6 +1492,7 @@ module aptos_framework::delegation_pool { public entry fun disable_delegators_allowlisting( owner: &signer, ) acquires DelegationPoolOwnership, DelegationPoolAllowlisting { + check_delegation_pool_management_permission(owner); let pool_address = get_owned_pool_address(signer::address_of(owner)); assert_allowlisting_enabled(pool_address); @@ -1485,6 +1508,7 @@ module aptos_framework::delegation_pool { owner: &signer, delegator_address: address, ) acquires DelegationPoolOwnership, DelegationPoolAllowlisting { + check_delegation_pool_management_permission(owner); let pool_address = get_owned_pool_address(signer::address_of(owner)); assert_allowlisting_enabled(pool_address); @@ -1500,6 +1524,7 @@ module aptos_framework::delegation_pool { owner: &signer, delegator_address: address, ) acquires DelegationPoolOwnership, DelegationPoolAllowlisting { + check_delegation_pool_management_permission(owner); let pool_address = get_owned_pool_address(signer::address_of(owner)); assert_allowlisting_enabled(pool_address); @@ -1515,6 +1540,7 @@ module aptos_framework::delegation_pool { owner: &signer, delegator_address: address, ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { + check_delegation_pool_management_permission(owner); let pool_address = get_owned_pool_address(signer::address_of(owner)); assert_allowlisting_enabled(pool_address); assert!( @@ -1539,6 +1565,7 @@ module aptos_framework::delegation_pool { pool_address: address, amount: u64 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { + check_stake_management_permission(delegator); // short-circuit if amount to add is 0 so no event is emitted if (amount == 0) { return }; @@ -1576,17 +1603,17 @@ module aptos_framework::delegation_pool { add_stake_fee, }, ); + } else { + event::emit_event( + &mut pool.add_stake_events, + AddStakeEvent { + pool_address, + delegator_address, + amount_added: amount, + add_stake_fee, + }, + ); }; - - event::emit_event( - &mut pool.add_stake_events, - AddStakeEvent { - pool_address, - delegator_address, - amount_added: amount, - add_stake_fee, - }, - ); } /// Unlock `amount` from the active + pending_active stake of `delegator` or @@ -1596,6 +1623,7 @@ module aptos_framework::delegation_pool { pool_address: address, amount: u64 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(delegator); // short-circuit if amount to unlock is 0 so no event is emitted if (amount == 0) { return }; @@ -1639,16 +1667,16 @@ module aptos_framework::delegation_pool { amount_unlocked: amount, }, ); + } else { + event::emit_event( + &mut pool.unlock_stake_events, + UnlockStakeEvent { + pool_address, + delegator_address, + amount_unlocked: amount, + }, + ); }; - - event::emit_event( - &mut pool.unlock_stake_events, - UnlockStakeEvent { - pool_address, - delegator_address, - amount_unlocked: amount, - }, - ); } /// Move `amount` of coins from pending_inactive to active. @@ -1657,6 +1685,7 @@ module aptos_framework::delegation_pool { pool_address: address, amount: u64 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { + check_stake_management_permission(delegator); // short-circuit if amount to reactivate is 0 so no event is emitted if (amount == 0) { return }; @@ -1689,16 +1718,16 @@ module aptos_framework::delegation_pool { amount_reactivated: amount, }, ); + } else { + event::emit_event( + &mut pool.reactivate_stake_events, + ReactivateStakeEvent { + pool_address, + delegator_address, + amount_reactivated: amount, + }, + ); }; - - event::emit_event( - &mut pool.reactivate_stake_events, - ReactivateStakeEvent { - pool_address, - delegator_address, - amount_reactivated: amount, - }, - ); } /// Withdraw `amount` of owned inactive stake from the delegation pool at `pool_address`. @@ -1707,6 +1736,7 @@ module aptos_framework::delegation_pool { pool_address: address, amount: u64 ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { + check_stake_management_permission(delegator); assert!(amount > 0, error::invalid_argument(EWITHDRAW_ZERO_STAKE)); // synchronize delegation and stake pools before any user operation synchronize_delegation_pool(pool_address); @@ -1774,16 +1804,16 @@ module aptos_framework::delegation_pool { amount_withdrawn: amount, }, ); + } else { + event::emit_event( + &mut pool.withdraw_stake_events, + WithdrawStakeEvent { + pool_address, + delegator_address, + amount_withdrawn: amount, + }, + ); }; - - event::emit_event( - &mut pool.withdraw_stake_events, - WithdrawStakeEvent { - pool_address, - delegator_address, - amount_withdrawn: amount, - }, - ); } /// Return the unique observed lockup cycle where delegator `delegator_address` may have @@ -2074,15 +2104,13 @@ module aptos_framework::delegation_pool { }, ); - if (features::operator_beneficiary_change_enabled()) { - emit(DistributeCommission { - pool_address, - operator: stake::get_operator(pool_address), - beneficiary: beneficiary_for_operator(stake::get_operator(pool_address)), - commission_active, - commission_pending_inactive, - }) - }; + emit(DistributeCommission { + pool_address, + operator: stake::get_operator(pool_address), + beneficiary: beneficiary_for_operator(stake::get_operator(pool_address)), + commission_active, + commission_pending_inactive, + }); // advance lockup cycle on delegation pool if already ended on stake pool (AND stake explicitly inactivated) if (lockup_cycle_ended) { @@ -2312,7 +2340,7 @@ module aptos_framework::delegation_pool { reconfiguration::initialize_for_test(aptos_framework); features::change_feature_flags_for_testing( aptos_framework, - vector[DELEGATION_POOLS, MODULE_EVENT, OPERATOR_BENEFICIARY_CHANGE, COMMISSION_CHANGE_DELEGATION_POOL], + vector[MODULE_EVENT], vector[] ); } @@ -2385,36 +2413,20 @@ module aptos_framework::delegation_pool { } #[test(aptos_framework = @aptos_framework, validator = @0x123)] - #[expected_failure(abort_code = 0x3000A, location = Self)] - public entry fun test_delegation_pools_disabled( - aptos_framework: &signer, - validator: &signer, - ) acquires DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { - initialize_for_test(aptos_framework); - features::change_feature_flags_for_testing(aptos_framework, vector[], vector[DELEGATION_POOLS]); - - initialize_delegation_pool(validator, 0, vector::empty()); - } - - #[test(aptos_framework = @aptos_framework, validator = @0x123)] - public entry fun test_set_operator_and_delegated_voter( + public entry fun test_set_operator( aptos_framework: &signer, validator: &signer, ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { initialize_for_test(aptos_framework); let validator_address = signer::address_of(validator); - initialize_delegation_pool(validator, 0, vector::empty()); + initialize_delegation_pool(validator, 0, vector[]); let pool_address = get_owned_pool_address(validator_address); assert!(stake::get_operator(pool_address) == @0x123, 1); - assert!(stake::get_delegated_voter(pool_address) == @0x123, 1); set_operator(validator, @0x111); assert!(stake::get_operator(pool_address) == @0x111, 2); - - set_delegated_voter(validator, @0x112); - assert!(stake::get_delegated_voter(pool_address) == @0x112, 2); } #[test(aptos_framework = @aptos_framework, validator = @0x123)] @@ -2428,17 +2440,6 @@ module aptos_framework::delegation_pool { set_operator(validator, @0x111); } - #[test(aptos_framework = @aptos_framework, validator = @0x123)] - #[expected_failure(abort_code = 0x60001, location = Self)] - public entry fun test_cannot_set_delegated_voter( - aptos_framework: &signer, - validator: &signer, - ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage { - initialize_for_test(aptos_framework); - // account does not own any delegation pool - set_delegated_voter(validator, @0x112); - } - #[test(aptos_framework = @aptos_framework, validator = @0x123)] #[expected_failure(abort_code = 0x80002, location = Self)] public entry fun test_already_owns_delegation_pool( @@ -2477,7 +2478,6 @@ module aptos_framework::delegation_pool { assert!(stake::stake_pool_exists(pool_address), 0); assert!(stake::get_operator(pool_address) == validator_address, 0); - assert!(stake::get_delegated_voter(pool_address) == validator_address, 0); assert!(observed_lockup_cycle(pool_address) == 0, 0); assert!(total_coins_inactive(pool_address) == 0, 0); @@ -4175,12 +4175,6 @@ module aptos_framework::delegation_pool { 100 * ONE_APT, 1000, ); - aptos_governance::initialize_partial_voting(aptos_framework); - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting(), features::get_delegation_pool_partial_governance_voting( - )], - vector[]); initialize_test_validator(validator, 100 * ONE_APT, true, false); let validator_address = signer::address_of(validator); @@ -4221,12 +4215,6 @@ module aptos_framework::delegation_pool { 100 * ONE_APT, 1000, ); - aptos_governance::initialize_partial_voting(aptos_framework); - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting(), features::get_delegation_pool_partial_governance_voting( - )], - vector[]); initialize_test_validator(validator, 100 * ONE_APT, true, false); let validator_address = signer::address_of(validator); @@ -4277,14 +4265,6 @@ module aptos_framework::delegation_pool { 100 * ONE_APT, 1000, ); - aptos_governance::initialize_partial_voting(aptos_framework); - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting(), features::get_delegation_pool_partial_governance_voting( - )], - vector[] - ); - initialize_test_validator(validator, 100 * ONE_APT, true, false); let validator_address = signer::address_of(validator); @@ -4414,70 +4394,6 @@ module aptos_framework::delegation_pool { assert!(calculate_and_update_voter_total_voting_power(pool_address, delegator2_address) == 0, 1); } - #[test(aptos_framework = @aptos_framework, validator = @0x123, delegator1 = @0x010, voter1 = @0x030)] - public entry fun test_voting_power_change_for_existing_delegation_pool( - aptos_framework: &signer, - validator: &signer, - delegator1: &signer, - voter1: &signer, - ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { - initialize_for_test_no_reward(aptos_framework); - aptos_governance::initialize_for_test( - aptos_framework, - (10 * ONE_APT as u128), - 100 * ONE_APT, - 1000, - ); - aptos_governance::initialize_partial_voting(aptos_framework); - - initialize_test_validator(validator, 100 * ONE_APT, true, false); - - let validator_address = signer::address_of(validator); - let pool_address = get_owned_pool_address(validator_address); - // Delegation pool is created before partial governance voting feature flag is enabled. So this delegation - // pool's voter is its owner. - assert!(stake::get_delegated_voter(pool_address) == validator_address, 1); - assert!(!partial_governance_voting_enabled(pool_address), 1); - - let delegator1_address = signer::address_of(delegator1); - account::create_account_for_test(delegator1_address); - let voter1_address = signer::address_of(voter1); - account::create_account_for_test(voter1_address); - - stake::mint(delegator1, 110 * ONE_APT); - add_stake(delegator1, pool_address, 10 * ONE_APT); - - // Enable partial governance voting feature flag. - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting(), features::get_delegation_pool_partial_governance_voting( - )], - vector[] - ); - // Voter doens't change until enabling partial governance voting on this delegation pool. - assert!(stake::get_delegated_voter(pool_address) == validator_address, 1); - // Enable partial governance voting on this delegation pool. - enable_partial_governance_voting(pool_address); - assert!(stake::get_delegated_voter(pool_address) == pool_address, 1); - assert!(partial_governance_voting_enabled(pool_address), 1); - - // By default, the voter of a delegator is itself. - assert!(calculate_and_update_voter_total_voting_power(pool_address, voter1_address) == 0, 1); - assert!(calculate_and_update_voter_total_voting_power(pool_address, delegator1_address) == 10 * ONE_APT, 1); - - // Delegator1 delegates its voting power to voter1. - // It takes 1 cycle to take effect. No immediate change. - delegate_voting_power(delegator1, pool_address, voter1_address); - assert!(calculate_and_update_voter_total_voting_power(pool_address, voter1_address) == 0, 1); - assert!(calculate_and_update_voter_total_voting_power(pool_address, delegator1_address) == 10 * ONE_APT, 1); - - // One cycle passed. The voter change takes effects. - timestamp::fast_forward_seconds(LOCKUP_CYCLE_SECONDS); - end_aptos_epoch(); - assert!(calculate_and_update_voter_total_voting_power(pool_address, voter1_address) == 10 * ONE_APT, 1); - assert!(calculate_and_update_voter_total_voting_power(pool_address, delegator1_address) == 0, 1); - } - #[test( aptos_framework = @aptos_framework, validator = @0x123, @@ -4510,13 +4426,6 @@ module aptos_framework::delegation_pool { 100 * ONE_APT, 1000, ); - aptos_governance::initialize_partial_voting(aptos_framework); - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting(), features::get_delegation_pool_partial_governance_voting( - )], - vector[] - ); // 50% commission rate initialize_test_validator_custom(validator, 100 * ONE_APT, true, false, 5000); @@ -4576,222 +4485,6 @@ module aptos_framework::delegation_pool { assert!(calculate_and_update_voter_total_voting_power(pool_address, delegator2_address) == 0, 1); } - #[test( - aptos_framework = @aptos_framework, - validator = @0x123, - delegator1 = @0x010, - delegator2 = @0x020, - voter1 = @0x030, - voter2 = @0x040 - )] - public entry fun test_voting_power_change_already_voted_before_partial( - aptos_framework: &signer, - validator: &signer, - delegator1: &signer, - delegator2: &signer, - voter1: &signer, - voter2: &signer, - ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { - // partial voing hasn't been enabled yet. A proposal has been created by the validator. - let proposal1_id = setup_vote(aptos_framework, validator, false); - - let validator_address = signer::address_of(validator); - let pool_address = get_owned_pool_address(validator_address); - - let delegator1_address = signer::address_of(delegator1); - account::create_account_for_test(delegator1_address); - let delegator2_address = signer::address_of(delegator2); - account::create_account_for_test(delegator2_address); - let voter1_address = signer::address_of(voter1); - account::create_account_for_test(voter1_address); - let voter2_address = signer::address_of(voter2); - account::create_account_for_test(voter2_address); - - stake::mint(delegator1, 110 * ONE_APT); - add_stake(delegator1, pool_address, 10 * ONE_APT); - stake::mint(delegator2, 110 * ONE_APT); - add_stake(delegator2, pool_address, 90 * ONE_APT); - - // Create 2 proposals and vote for proposal1. - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); - let proposal2_id = aptos_governance::create_proposal_v2_impl( - validator, - pool_address, - execution_hash, - b"", - b"", - true, - ); - aptos_governance::vote(validator, pool_address, proposal1_id, true); - - // Enable partial governance voting feature flag. - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting(), features::get_delegation_pool_partial_governance_voting( - )], - vector[] - ); - // Voter doens't change until enabling partial governance voting on this delegation pool. - assert!(stake::get_delegated_voter(pool_address) == validator_address, 1); - // Enable partial governance voting on this delegation pool. - enable_partial_governance_voting(pool_address); - assert!(stake::get_delegated_voter(pool_address) == pool_address, 1); - assert!(partial_governance_voting_enabled(pool_address), 1); - - assert!(calculate_and_update_voter_total_voting_power(pool_address, validator_address) == 100 * ONE_APT, 1); - assert!(calculate_and_update_voter_total_voting_power(pool_address, delegator1_address) == 10 * ONE_APT, 1); - assert!(calculate_and_update_voter_total_voting_power(pool_address, delegator2_address) == 90 * ONE_APT, 1); - // No one can vote for proposal1 because it's already voted before enabling partial governance voting. - assert!(calculate_and_update_remaining_voting_power(pool_address, validator_address, proposal1_id) == 0, 1); - assert!(calculate_and_update_remaining_voting_power(pool_address, delegator1_address, proposal1_id) == 0, 1); - assert!(calculate_and_update_remaining_voting_power(pool_address, delegator2_address, proposal1_id) == 0, 1); - assert!( - calculate_and_update_remaining_voting_power(pool_address, validator_address, proposal2_id) == 100 * ONE_APT, - 1 - ); - assert!( - calculate_and_update_remaining_voting_power(pool_address, delegator1_address, proposal2_id) == 10 * ONE_APT, - 1 - ); - assert!( - calculate_and_update_remaining_voting_power(pool_address, delegator2_address, proposal2_id) == 90 * ONE_APT, - 1 - ); - - // Delegator1 tries to use 50 APT to vote on proposal2, but it only has 10 APT. So only 10 APT voting power is used. - vote(delegator1, pool_address, proposal2_id, 50 * ONE_APT, true); - assert!(calculate_and_update_remaining_voting_power(pool_address, delegator1_address, proposal2_id) == 0, 1); - - add_stake(delegator1, pool_address, 60 * ONE_APT); - assert!(calculate_and_update_voter_total_voting_power(pool_address, delegator1_address) == 70 * ONE_APT, 1); - vote(delegator1, pool_address, proposal2_id, 25 * ONE_APT, true); - assert!( - calculate_and_update_remaining_voting_power(pool_address, delegator1_address, proposal2_id) == 35 * ONE_APT, - 1 - ); - vote(delegator1, pool_address, proposal2_id, 30 * ONE_APT, false); - assert!( - calculate_and_update_remaining_voting_power(pool_address, delegator1_address, proposal2_id) == 5 * ONE_APT, - 1 - ); - } - - #[test(aptos_framework = @aptos_framework, validator = @0x123, delegator1 = @0x010, voter1 = @0x030)] - #[expected_failure(abort_code = 0x10010, location = Self)] - public entry fun test_vote_should_failed_if_already_voted_before_enable_partial_voting_flag( - aptos_framework: &signer, - validator: &signer, - delegator1: &signer, - voter1: &signer, - ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { - // partial voing hasn't been enabled yet. A proposal has been created by the validator. - let proposal1_id = setup_vote(aptos_framework, validator, false); - - let validator_address = signer::address_of(validator); - let pool_address = get_owned_pool_address(validator_address); - let delegator1_address = signer::address_of(delegator1); - account::create_account_for_test(delegator1_address); - let voter1_address = signer::address_of(voter1); - account::create_account_for_test(voter1_address); - - stake::mint(delegator1, 110 * ONE_APT); - add_stake(delegator1, pool_address, 10 * ONE_APT); - end_aptos_epoch(); - - aptos_governance::vote(validator, pool_address, proposal1_id, true); - - // Enable partial governance voting feature flag. - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting(), features::get_delegation_pool_partial_governance_voting( - )], - vector[] - ); - // Enable partial governance voting on this delegation pool. - enable_partial_governance_voting(pool_address); - - vote(delegator1, pool_address, proposal1_id, 10 * ONE_APT, true); - } - - #[test(aptos_framework = @aptos_framework, validator = @0x123, delegator1 = @0x010, voter1 = @0x030)] - #[expected_failure(abort_code = 0x10011, location = Self)] - public entry fun test_vote_should_failed_if_already_voted_before_enable_partial_voting_on_pool( - aptos_framework: &signer, - validator: &signer, - delegator1: &signer, - voter1: &signer, - ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { - // partial voing hasn't been enabled yet. A proposal has been created by the validator. - let proposal1_id = setup_vote(aptos_framework, validator, false); - - let validator_address = signer::address_of(validator); - let pool_address = get_owned_pool_address(validator_address); - let delegator1_address = signer::address_of(delegator1); - account::create_account_for_test(delegator1_address); - let voter1_address = signer::address_of(voter1); - account::create_account_for_test(voter1_address); - - stake::mint(delegator1, 110 * ONE_APT); - add_stake(delegator1, pool_address, 10 * ONE_APT); - end_aptos_epoch(); - - // Enable partial governance voting feature flag. - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting(), features::get_delegation_pool_partial_governance_voting( - )], - vector[] - ); - - // The operator voter votes on the proposal after partial governace voting flag is enabled but before partial voting is enabled on the pool. - aptos_governance::vote(validator, pool_address, proposal1_id, true); - - // Enable partial governance voting on this delegation pool. - enable_partial_governance_voting(pool_address); - - add_stake(delegator1, pool_address, 10 * ONE_APT); - vote(delegator1, pool_address, proposal1_id, 10 * ONE_APT, true); - } - - #[test(aptos_framework = @aptos_framework, validator = @0x123, delegator1 = @0x010)] - #[expected_failure(abort_code = 0x10010, location = Self)] - public entry fun test_vote_should_failed_if_no_stake( - aptos_framework: &signer, - validator: &signer, - delegator1: &signer, - ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { - // partial voing hasn't been enabled yet. A proposal has been created by the validator. - let proposal1_id = setup_vote(aptos_framework, validator, true); - - let validator_address = signer::address_of(validator); - let pool_address = get_owned_pool_address(validator_address); - let delegator1_address = signer::address_of(delegator1); - account::create_account_for_test(delegator1_address); - - // Delegator1 has no stake. Abort. - vote(delegator1, pool_address, proposal1_id, 10 * ONE_APT, true); - } - - #[test(aptos_framework = @aptos_framework, validator = @0x123, delegator1 = @0x010, voter1 = @0x030)] - public entry fun test_delegate_voting_power_should_pass_even_if_no_stake( - aptos_framework: &signer, - validator: &signer, - delegator1: &signer, - voter1: &signer, - ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { - // partial voing hasn't been enabled yet. A proposal has been created by the validator. - setup_vote(aptos_framework, validator, true); - - let validator_address = signer::address_of(validator); - let pool_address = get_owned_pool_address(validator_address); - let delegator1_address = signer::address_of(delegator1); - account::create_account_for_test(delegator1_address); - - // Delegator1 has no stake. Abort. - delegate_voting_power(delegator1, pool_address, signer::address_of(voter1)); - } - #[test( aptos_framework = @aptos_framework, validator = @0x123, @@ -4807,15 +4500,6 @@ module aptos_framework::delegation_pool { voter2: &signer, ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { initialize_for_test(aptos_framework); - aptos_governance::initialize_partial_voting(aptos_framework); - features::change_feature_flags_for_testing( - aptos_framework, - vector[ - features::get_partial_governance_voting(), - features::get_delegation_pool_partial_governance_voting() - ], - vector[] - ); initialize_test_validator(validator, 100 * ONE_APT, true, true); let validator_address = signer::address_of(validator); @@ -4955,15 +4639,6 @@ module aptos_framework::delegation_pool { voter2: &signer, ) acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { initialize_for_test(aptos_framework); - aptos_governance::initialize_partial_voting(aptos_framework); - features::change_feature_flags_for_testing( - aptos_framework, - vector[ - features::get_partial_governance_voting(), - features::get_delegation_pool_partial_governance_voting() - ], - vector[] - ); // activate more validators in order to inactivate one later initialize_test_validator(validator, 100 * ONE_APT, true, false); @@ -5514,53 +5189,6 @@ module aptos_framework::delegation_pool { } } - #[test_only] - public fun setup_vote( - aptos_framework: &signer, - validator: &signer, - enable_partial_voting: bool, - ): u64 acquires DelegationPoolOwnership, DelegationPool, GovernanceRecords, BeneficiaryForOperator, NextCommissionPercentage, DelegationPoolAllowlisting { - initialize_for_test_no_reward(aptos_framework); - aptos_governance::initialize_for_test( - aptos_framework, - (10 * ONE_APT as u128), - 100 * ONE_APT, - 1000, - ); - aptos_governance::initialize_partial_voting(aptos_framework); - - initialize_test_validator(validator, 100 * ONE_APT, true, false); - - let validator_address = signer::address_of(validator); - let pool_address = get_owned_pool_address(validator_address); - // Delegation pool is created before partial governance voting feature flag is enabled. So this delegation - // pool's voter is its owner. - assert!(stake::get_delegated_voter(pool_address) == validator_address, 1); - assert!(!partial_governance_voting_enabled(pool_address), 1); - end_aptos_epoch(); - - // Create 1 proposals and vote for proposal1. - let execution_hash = vector::empty(); - vector::push_back(&mut execution_hash, 1); - let proposal_id = aptos_governance::create_proposal_v2_impl( - validator, - pool_address, - execution_hash, - b"", - b"", - true, - ); - if (enable_partial_voting) { - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_partial_governance_voting( - ), features::get_delegation_pool_partial_governance_voting()], - vector[]); - enable_partial_governance_voting(pool_address); - }; - proposal_id - } - #[test_only] public fun total_coins_inactive(pool_address: address): u64 acquires DelegationPool { borrow_global(pool_address).total_coins_inactive diff --git a/aptos-move/framework/aptos-framework/sources/dispatchable_fungible_asset.move b/aptos-move/framework/aptos-framework/sources/dispatchable_fungible_asset.move index aa843a38f7201..293b4e069efb5 100644 --- a/aptos-move/framework/aptos-framework/sources/dispatchable_fungible_asset.move +++ b/aptos-move/framework/aptos-framework/sources/dispatchable_fungible_asset.move @@ -58,6 +58,16 @@ module aptos_framework::dispatchable_fungible_asset { ); } + public fun register_derive_supply_dispatch_function( + constructor_ref: &ConstructorRef, + dispatch_function: Option + ) { + fungible_asset::register_derive_supply_dispatch_function( + constructor_ref, + dispatch_function + ); + } + /// Withdraw `amount` of the fungible asset from `store` by the owner. /// /// The semantics of deposit will be governed by the function specified in DispatchFunctionStore. @@ -67,13 +77,13 @@ module aptos_framework::dispatchable_fungible_asset { amount: u64, ): FungibleAsset acquires TransferRefStore { fungible_asset::withdraw_sanity_check(owner, store, false); + fungible_asset::withdraw_permission_check(owner, store, amount); let func_opt = fungible_asset::withdraw_dispatch_function(store); if (option::is_some(&func_opt)) { assert!( features::dispatchable_fungible_asset_enabled(), error::aborted(ENOT_ACTIVATED) ); - let start_balance = fungible_asset::balance(store); let func = option::borrow(&func_opt); function_info::load_module_from_function(func); let fa = dispatchable_withdraw( @@ -82,11 +92,9 @@ module aptos_framework::dispatchable_fungible_asset { borrow_transfer_ref(store), func, ); - let end_balance = fungible_asset::balance(store); - assert!(amount <= start_balance - end_balance, error::aborted(EAMOUNT_MISMATCH)); fa } else { - fungible_asset::withdraw_internal(object::object_address(&store), amount) + fungible_asset::unchecked_withdraw(object::object_address(&store), amount) } } @@ -110,7 +118,7 @@ module aptos_framework::dispatchable_fungible_asset { func ) } else { - fungible_asset::deposit_internal(object::object_address(&store), fa) + fungible_asset::unchecked_deposit(object::object_address(&store), fa) } } @@ -162,6 +170,44 @@ module aptos_framework::dispatchable_fungible_asset { } } + #[view] + /// Whether the derived value of store using the overloaded hook is at least `amount` + /// + /// The semantics of value will be governed by the function specified in DispatchFunctionStore. + public fun is_derived_balance_at_least(store: Object, amount: u64): bool { + let func_opt = fungible_asset::derived_balance_dispatch_function(store); + if (option::is_some(&func_opt)) { + assert!( + features::dispatchable_fungible_asset_enabled(), + error::aborted(ENOT_ACTIVATED) + ); + let func = option::borrow(&func_opt); + function_info::load_module_from_function(func); + dispatchable_derived_balance(store, func) >= amount + } else { + fungible_asset::is_balance_at_least(store, amount) + } + } + + #[view] + /// Get the derived supply of the fungible asset using the overloaded hook. + /// + /// The semantics of supply will be governed by the function specified in DeriveSupplyDispatch. + public fun derived_supply(metadata: Object): Option { + let func_opt = fungible_asset::derived_supply_dispatch_function(metadata); + if (option::is_some(&func_opt)) { + assert!( + features::dispatchable_fungible_asset_enabled(), + error::aborted(ENOT_ACTIVATED) + ); + let func = option::borrow(&func_opt); + function_info::load_module_from_function(func); + dispatchable_derived_supply(metadata, func) + } else { + fungible_asset::supply(metadata) + } + } + inline fun borrow_transfer_ref(metadata: Object): &TransferRef acquires TransferRefStore { let metadata_addr = object::object_address( &fungible_asset::store_metadata(metadata) @@ -191,4 +237,9 @@ module aptos_framework::dispatchable_fungible_asset { store: Object, function: &FunctionInfo, ): u64; + + native fun dispatchable_derived_supply( + metadata: Object, + function: &FunctionInfo, + ): Option; } diff --git a/aptos-move/framework/aptos-framework/sources/dispatchable_fungible_asset.spec.move b/aptos-move/framework/aptos-framework/sources/dispatchable_fungible_asset.spec.move index 5932673f6041c..744faced798ac 100644 --- a/aptos-move/framework/aptos-framework/sources/dispatchable_fungible_asset.spec.move +++ b/aptos-move/framework/aptos-framework/sources/dispatchable_fungible_asset.spec.move @@ -14,4 +14,8 @@ spec aptos_framework::dispatchable_fungible_asset { spec dispatchable_derived_balance{ pragma opaque; } + + spec dispatchable_derived_supply{ + pragma opaque; + } } diff --git a/aptos-move/framework/aptos-framework/sources/ethereum.move b/aptos-move/framework/aptos-framework/sources/ethereum.move deleted file mode 100644 index 0c883393c8729..0000000000000 --- a/aptos-move/framework/aptos-framework/sources/ethereum.move +++ /dev/null @@ -1,193 +0,0 @@ -module aptos_framework::ethereum { - use std::vector; - use aptos_std::aptos_hash::keccak256; - - /// Constants for ASCII character codes - const ASCII_A: u8 = 0x41; - const ASCII_Z: u8 = 0x5A; - const ASCII_A_LOWERCASE: u8 = 0x61; - const ASCII_F_LOWERCASE: u8 = 0x66; - - // Error codes - - const EINVALID_LENGTH: u64 = 1; - - /// Represents an Ethereum address within Aptos smart contracts. - /// Provides structured handling, storage, and validation of Ethereum addresses. - struct EthereumAddress has store, copy, drop { - inner: vector, - } - - /// Validates an Ethereum address against EIP-55 checksum rules and returns a new `EthereumAddress`. - /// - /// @param ethereum_address A 40-byte vector of unsigned 8-bit integers (hexadecimal format). - /// @return A validated `EthereumAddress` struct. - /// @abort If the address does not conform to EIP-55 standards. - public fun ethereum_address(ethereum_address: vector): EthereumAddress { - assert_eip55(ðereum_address); - EthereumAddress { inner: ethereum_address } - } - - /// Returns a new `EthereumAddress` without EIP-55 validation. - /// - /// @param ethereum_address A 40-byte vector of unsigned 8-bit integers (hexadecimal format). - /// @return A validated `EthereumAddress` struct. - /// @abort If the address does not conform to EIP-55 standards. - public fun ethereum_address_no_eip55(ethereum_address: vector): EthereumAddress { - assert_40_char_hex(ðereum_address); - EthereumAddress { inner: ethereum_address } - } - - /// Returns a new 20-byte `EthereumAddress` without EIP-55 validation. - /// - /// @param ethereum_address A 20-byte vector of unsigned 8-bit bytes. - /// @return An `EthereumAddress` struct. - /// @abort If the address does not conform to EIP-55 standards. - public fun ethereum_address_20_bytes(ethereum_address: vector): EthereumAddress { - assert!(vector::length(ðereum_address) == 20, EINVALID_LENGTH); - EthereumAddress { inner: ethereum_address } - } - - /// Gets the inner vector of an `EthereumAddress`. - /// - /// @param ethereum_address A 40-byte vector of unsigned 8-bit integers (hexadecimal format). - /// @return The vector inner value of the EthereumAddress - public fun get_inner_ethereum_address(ethereum_address: EthereumAddress): vector { - ethereum_address.inner - } - - /// Converts uppercase ASCII characters in a vector to their lowercase equivalents. - /// - /// @param input A reference to a vector of ASCII characters. - /// @return A new vector with lowercase equivalents of the input characters. - /// @note Only affects ASCII letters; non-alphabetic characters are unchanged. - public fun to_lowercase(input: &vector): vector { - let lowercase_bytes = vector::empty(); - vector::enumerate_ref(input, |_i, element| { - let lower_byte = if (*element >= ASCII_A && *element <= ASCII_Z) { - *element + 32 - } else { - *element - }; - vector::push_back(&mut lowercase_bytes, lower_byte); - }); - lowercase_bytes - } - - #[test] - fun test_to_lowercase() { - let upper = b"TeST"; - let lower = b"test"; - assert!(to_lowercase(&upper) == lower, 0); - } - - /// Converts an Ethereum address to EIP-55 checksummed format. - /// - /// @param ethereum_address A 40-character vector representing the Ethereum address in hexadecimal format. - /// @return The EIP-55 checksummed version of the input address. - /// @abort If the input address does not have exactly 40 characters. - /// @note Assumes input address is valid and in lowercase hexadecimal format. - public fun to_eip55_checksumed_address(ethereum_address: &vector): vector { - assert!(vector::length(ethereum_address) == 40, 0); - let lowercase = to_lowercase(ethereum_address); - let hash = keccak256(lowercase); - let output = vector::empty(); - - for (index in 0..40) { - let item = *vector::borrow(ethereum_address, index); - if (item >= ASCII_A_LOWERCASE && item <= ASCII_F_LOWERCASE) { - let hash_item = *vector::borrow(&hash, index / 2); - if ((hash_item >> ((4 * (1 - (index % 2))) as u8)) & 0xF >= 8) { - vector::push_back(&mut output, item - 32); - } else { - vector::push_back(&mut output, item); - } - } else { - vector::push_back(&mut output, item); - } - }; - output - } - - public fun get_inner(eth_address: &EthereumAddress): vector { - eth_address.inner - } - - /// Checks if an Ethereum address conforms to the EIP-55 checksum standard. - /// - /// @param ethereum_address A reference to a 40-character vector of an Ethereum address in hexadecimal format. - /// @abort If the address does not match its EIP-55 checksummed version. - /// @note Assumes the address is correctly formatted as a 40-character hexadecimal string. - public fun assert_eip55(ethereum_address: &vector) { - let eip55 = to_eip55_checksumed_address(ethereum_address); - let len = vector::length(&eip55); - for (index in 0..len) { - assert!(vector::borrow(&eip55, index) == vector::borrow(ethereum_address, index), 0); - }; - } - - /// Checks if an Ethereum address is a nonzero 40-character hexadecimal string. - /// - /// @param ethereum_address A reference to a vector of bytes representing the Ethereum address as characters. - /// @abort If the address is not 40 characters long, contains invalid characters, or is all zeros. - public fun assert_40_char_hex(ethereum_address: &vector) { - let len = vector::length(ethereum_address); - - // Ensure the address is exactly 40 characters long - assert!(len == 40, 1); - - // Ensure the address contains only valid hexadecimal characters - let is_zero = true; - for (index in 0..len) { - let char = *vector::borrow(ethereum_address, index); - - // Check if the character is a valid hexadecimal character (0-9, a-f, A-F) - assert!( - (char >= 0x30 && char <= 0x39) || // '0' to '9' - (char >= 0x41 && char <= 0x46) || // 'A' to 'F' - (char >= 0x61 && char <= 0x66), // 'a' to 'f' - 2 - ); - - // Check if the address is nonzero - if (char != 0x30) { // '0' - is_zero = false; - }; - }; - - // Abort if the address is all zeros - assert!(!is_zero, 3); - } - - #[test_only] - public fun eth_address_20_bytes(): vector { - vector[0x32, 0xBe, 0x34, 0x3B, 0x94, 0xf8, 0x60, 0x12, 0x4d, 0xC4, 0xfE, 0xE2, 0x78, 0xFD, 0xCB, 0xD3, 0x8C, 0x10, 0x2D, 0x88] -} - - #[test_only] - public fun valid_eip55(): vector { - b"32Be343B94f860124dC4fEe278FDCBD38C102D88" - } - - #[test_only] - public fun invalid_eip55(): vector { - b"32be343b94f860124dc4fee278fdcbd38c102d88" - } - - #[test] - fun test_valid_eip55_checksum() { - assert_eip55(&valid_eip55()); - } - - #[test] - #[expected_failure(abort_code = 0, location = Self)] - fun test_invalid_eip55_checksum() { - assert_eip55(&invalid_eip55()); - } - - #[test] - #[expected_failure(abort_code = 0, location = Self)] - fun test_simple_invalid_eip55_checksum() { - assert_eip55(&b"0"); - } -} \ No newline at end of file diff --git a/aptos-move/framework/aptos-framework/sources/event.move b/aptos-move/framework/aptos-framework/sources/event.move index 542808163e88e..0f65378fe4de8 100644 --- a/aptos-move/framework/aptos-framework/sources/event.move +++ b/aptos-move/framework/aptos-framework/sources/event.move @@ -4,12 +4,15 @@ /// events emitted to a handle and emit events to the event store. module aptos_framework::event { use std::bcs; - use aptos_framework::guid::GUID; friend aptos_framework::account; friend aptos_framework::object; + /// An event cannot be created. This error is returned by native implementations when + /// - The type tag for event is too deeply nested. + const ECANNOT_CREATE_EVENT: u64 = 1; + /// Emit a module event with payload `msg`. public fun emit(msg: T) { write_module_event_to_store(msg); diff --git a/aptos-move/framework/aptos-framework/sources/function_info.move b/aptos-move/framework/aptos-framework/sources/function_info.move index c7f78c11d081c..6466c15d45a58 100644 --- a/aptos-move/framework/aptos-framework/sources/function_info.move +++ b/aptos-move/framework/aptos-framework/sources/function_info.move @@ -7,6 +7,7 @@ module aptos_framework::function_info { friend aptos_framework::fungible_asset; friend aptos_framework::dispatchable_fungible_asset; + friend aptos_framework::account_abstraction; /// String is not a valid Move identifier const EINVALID_IDENTIFIER: u64 = 1; @@ -35,7 +36,7 @@ module aptos_framework::function_info { ) } - public(friend) fun new_function_info_from_address( + public fun new_function_info_from_address( module_address: address, module_name: String, function_name: String, diff --git a/aptos-move/framework/aptos-framework/sources/function_info.spec.move b/aptos-move/framework/aptos-framework/sources/function_info.spec.move index 06beb9cc209c1..62f91e35f39a6 100644 --- a/aptos-move/framework/aptos-framework/sources/function_info.spec.move +++ b/aptos-move/framework/aptos-framework/sources/function_info.spec.move @@ -1,13 +1,67 @@ spec aptos_framework::function_info { spec module { - pragma verify = false; + fun spec_is_identifier(s: vector): bool; + } + + // native function + spec check_dispatch_type_compatibility_impl(lhs: &FunctionInfo, r: &FunctionInfo): bool { + // TODO: temporary mockup + pragma opaque; + } + + // native function + spec load_function_impl(f: &FunctionInfo){ + // TODO: temporary mockup + pragma opaque; + } + + // native function + spec is_identifier(s: &vector): bool { + pragma opaque; + aborts_if [abstract] false; + ensures [abstract] result == spec_is_identifier(s); + } + + spec new_function_info( + module_signer: &signer, + module_name: String, + function_name: String, + ): FunctionInfo { + aborts_if !spec_is_identifier(string::bytes(module_name)); + aborts_if !spec_is_identifier(string::bytes(function_name)); + ensures result == FunctionInfo { + module_address: signer::address_of(module_signer), + module_name, + function_name, + }; } - spec check_dispatch_type_compatibility_impl { + spec new_function_info_from_address( + module_address: address, + module_name: String, + function_name: String, + ): FunctionInfo { + aborts_if !spec_is_identifier(string::bytes(module_name)); + aborts_if !spec_is_identifier(string::bytes(function_name)); + ensures result == FunctionInfo { + module_address, + module_name, + function_name, + }; + } + + spec load_module_from_function(f: &FunctionInfo) { + // TODO: temporary mockup + pragma verify = false; pragma opaque; } - spec load_function_impl{ + spec check_dispatch_type_compatibility( + framework_function: &FunctionInfo, + dispatch_target: &FunctionInfo, + ): bool { + // TODO: temporary mockup + pragma verify = false; pragma opaque; } } diff --git a/aptos-move/framework/aptos-framework/sources/fungible_asset.move b/aptos-move/framework/aptos-framework/sources/fungible_asset.move index 194cf32d9e0ca..cba8e81071cdc 100644 --- a/aptos-move/framework/aptos-framework/sources/fungible_asset.move +++ b/aptos-move/framework/aptos-framework/sources/fungible_asset.move @@ -6,6 +6,7 @@ module aptos_framework::fungible_asset { use aptos_framework::event; use aptos_framework::function_info::{Self, FunctionInfo}; use aptos_framework::object::{Self, Object, ConstructorRef, DeleteRef, ExtendRef}; + use aptos_framework::permissioned_signer; use std::string; use std::features; @@ -19,7 +20,6 @@ module aptos_framework::fungible_asset { friend aptos_framework::aptos_account; friend aptos_framework::dispatchable_fungible_asset; - friend aptos_framework::governed_gas_pool; /// Amount cannot be zero. const EAMOUNT_CANNOT_BE_ZERO: u64 = 1; @@ -86,13 +86,21 @@ module aptos_framework::fungible_asset { const EAPT_NOT_DISPATCHABLE: u64 = 31; /// Flag for Concurrent Supply not enabled const ECONCURRENT_BALANCE_NOT_ENABLED: u64 = 32; - + /// Provided derived_supply function type doesn't meet the signature requirement. + const EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH: u64 = 33; + /// The balance ref and the fungible asset do not match. + const ERAW_BALANCE_REF_AND_FUNGIBLE_ASSET_MISMATCH: u64 = 34; + /// The supply ref and the fungible asset do not match. + const ERAW_SUPPLY_REF_AND_FUNGIBLE_ASSET_MISMATCH: u64 = 35; + + /// signer don't have the permission to perform withdraw operation + const EWITHDRAW_PERMISSION_DENIED: u64 = 36; // // Constants // const MAX_NAME_LENGTH: u64 = 32; - const MAX_SYMBOL_LENGTH: u64 = 10; + const MAX_SYMBOL_LENGTH: u64 = 32; const MAX_DECIMALS: u8 = 32; const MAX_URI_LENGTH: u64 = 512; @@ -153,6 +161,11 @@ module aptos_framework::fungible_asset { derived_balance_function: Option, } + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] + struct DeriveSupply has key { + dispatch_function: Option + } + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] /// The store object that holds concurrent fungible asset balance. struct ConcurrentFungibleBalance has key { @@ -178,6 +191,16 @@ module aptos_framework::fungible_asset { metadata: Object } + /// RawBalanceRef will be used to access the raw balance for FAs that registered `derived_balance` hook. + struct RawBalanceRef has drop, store { + metadata: Object + } + + /// RawSupplyRef will be used to access the raw supply for FAs that registered `derived_supply` hook. + struct RawSupplyRef has drop, store { + metadata: Object + } + /// BurnRef can be used to burn fungible assets from a given holder account. struct BurnRef has drop, store { metadata: Object @@ -188,6 +211,10 @@ module aptos_framework::fungible_asset { metadata: Object } + enum WithdrawPermission has copy, drop, store { + ByStore { store_address: address } + } + #[event] /// Emitted when fungible assets are deposited into a store. struct Deposit has drop, store { @@ -209,6 +236,14 @@ module aptos_framework::fungible_asset { frozen: bool, } + #[event] + /// Module event emitted when a fungible store is deleted. + struct FungibleStoreDeletion has drop, store { + store: address, + owner: address, + metadata: address, + } + inline fun default_to_concurrent_fungible_supply(): bool { features::concurrent_fungible_assets_enabled() } @@ -349,28 +384,13 @@ module aptos_framework::fungible_asset { ) ); }); - - // Cannot register hook for APT. - assert!( - object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset, - error::permission_denied(EAPT_NOT_DISPATCHABLE) - ); - assert!( - !object::can_generate_delete_ref(constructor_ref), - error::invalid_argument(EOBJECT_IS_DELETABLE) - ); + register_dispatch_function_sanity_check(constructor_ref); assert!( !exists( object::address_from_constructor_ref(constructor_ref) ), error::already_exists(EALREADY_REGISTERED) ); - assert!( - exists( - object::address_from_constructor_ref(constructor_ref) - ), - error::not_found(EFUNGIBLE_METADATA_EXISTENCE), - ); let store_obj = &object::generate_signer(constructor_ref); @@ -385,6 +405,70 @@ module aptos_framework::fungible_asset { ); } + /// Define the derived supply dispatch with the provided function. + public(friend) fun register_derive_supply_dispatch_function( + constructor_ref: &ConstructorRef, + dispatch_function: Option + ) { + // Verify that caller type matches callee type so wrongly typed function cannot be registered. + option::for_each_ref(&dispatch_function, |supply_function| { + let function_info = function_info::new_function_info_from_address( + @aptos_framework, + string::utf8(b"dispatchable_fungible_asset"), + string::utf8(b"dispatchable_derived_supply"), + ); + // Verify that caller type matches callee type so wrongly typed function cannot be registered. + assert!( + function_info::check_dispatch_type_compatibility( + &function_info, + supply_function + ), + error::invalid_argument( + EDERIVED_SUPPLY_FUNCTION_SIGNATURE_MISMATCH + ) + ); + }); + register_dispatch_function_sanity_check(constructor_ref); + assert!( + !exists( + object::address_from_constructor_ref(constructor_ref) + ), + error::already_exists(EALREADY_REGISTERED) + ); + + + let store_obj = &object::generate_signer(constructor_ref); + + // Store the overload function hook. + move_to( + store_obj, + DeriveSupply { + dispatch_function + } + ); + } + + /// Check the requirements for registering a dispatchable function. + inline fun register_dispatch_function_sanity_check( + constructor_ref: &ConstructorRef, + ) { + // Cannot register hook for APT. + assert!( + object::address_from_constructor_ref(constructor_ref) != @aptos_fungible_asset, + error::permission_denied(EAPT_NOT_DISPATCHABLE) + ); + assert!( + !object::can_generate_delete_ref(constructor_ref), + error::invalid_argument(EOBJECT_IS_DELETABLE) + ); + assert!( + exists( + object::address_from_constructor_ref(constructor_ref) + ), + error::not_found(EFUNGIBLE_METADATA_EXISTENCE), + ); + } + /// Creates a mint ref that can be used to mint fungible assets from the given fungible object's constructor ref. /// This can only be called at object creation time as constructor_ref is only available then. public fun generate_mint_ref(constructor_ref: &ConstructorRef): MintRef { @@ -407,6 +491,22 @@ module aptos_framework::fungible_asset { TransferRef { metadata } } + /// Creates a balance ref that can be used to access raw balance of fungible assets from the given fungible + /// object's constructor ref. + /// This can only be called at object creation time as constructor_ref is only available then. + public fun generate_raw_balance_ref(constructor_ref: &ConstructorRef): RawBalanceRef { + let metadata = object::object_from_constructor_ref(constructor_ref); + RawBalanceRef { metadata } + } + + /// Creates a supply ref that can be used to access raw supply of fungible assets from the given fungible + /// object's constructor ref. + /// This can only be called at object creation time as constructor_ref is only available then. + public fun generate_raw_supply_ref(constructor_ref: &ConstructorRef): RawSupplyRef { + let metadata = object::object_from_constructor_ref(constructor_ref); + RawSupplyRef { metadata } + } + /// Creates a mutate metadata ref that can be used to change the metadata information of fungible assets from the /// given fungible object's constructor ref. /// This can only be called at object creation time as constructor_ref is only available then. @@ -417,11 +517,22 @@ module aptos_framework::fungible_asset { #[view] /// Get the current supply from the `metadata` object. + /// + /// Note: This function will abort on FAs with `derived_supply` hook set up. + /// Use `dispatchable_fungible_asset::supply` instead if you intend to work with those FAs. public fun supply(metadata: Object): Option acquires Supply, ConcurrentSupply { + assert!( + !has_supply_dispatch_function(object::object_address(&metadata)), + error::invalid_argument(EINVALID_DISPATCHABLE_OPERATIONS) + ); + supply_impl(metadata) + } + + fun supply_impl(metadata: Object): Option acquires Supply, ConcurrentSupply { let metadata_address = object::object_address(&metadata); if (exists(metadata_address)) { let supply = borrow_global(metadata_address); - option::some(aggregator_v2::read(&supply.current)) + option::some(supply.current.read()) } else if (exists(metadata_address)) { let supply = borrow_global(metadata_address); option::some(supply.current) @@ -437,7 +548,7 @@ module aptos_framework::fungible_asset { let metadata_address = object::object_address(&metadata); if (exists(metadata_address)) { let supply = borrow_global(metadata_address); - let max_value = aggregator_v2::max_value(&supply.current); + let max_value = supply.current.max_value(); if (max_value == MAX_U128) { option::none() } else { @@ -522,13 +633,25 @@ module aptos_framework::fungible_asset { #[view] /// Get the balance of a given store. - public fun balance(store: Object): u64 acquires FungibleStore, ConcurrentFungibleBalance { + /// + /// Note: This function will abort on FAs with `derived_balance` hook set up. + /// Use `dispatchable_fungible_asset::balance` instead if you intend to work with those FAs. + public fun balance(store: Object): u64 acquires FungibleStore, ConcurrentFungibleBalance, DispatchFunctionStore { + let fa_store = borrow_store_resource(&store); + assert!( + !has_balance_dispatch_function(fa_store.metadata), + error::invalid_argument(EINVALID_DISPATCHABLE_OPERATIONS) + ); + balance_impl(store) + } + + fun balance_impl(store: Object): u64 acquires FungibleStore, ConcurrentFungibleBalance { let store_addr = object::object_address(&store); if (store_exists_inline(store_addr)) { let store_balance = borrow_store_resource(&store).balance; if (store_balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) { let balance_resource = borrow_global(store_addr); - aggregator_v2::read(&balance_resource.balance) + balance_resource.balance.read() } else { store_balance } @@ -550,7 +673,7 @@ module aptos_framework::fungible_asset { let store_balance = borrow_global(store_addr).balance; if (store_balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) { let balance_resource = borrow_global(store_addr); - aggregator_v2::is_at_least(&balance_resource.balance, amount) + balance_resource.balance.is_at_least(amount) } else { store_balance >= amount } @@ -616,6 +739,25 @@ module aptos_framework::fungible_asset { } } + fun has_balance_dispatch_function(metadata: Object): bool acquires DispatchFunctionStore { + let metadata_addr = object::object_address(&metadata); + // Short circuit on APT for better perf + if (metadata_addr != @aptos_fungible_asset && exists(metadata_addr)) { + option::is_some(&borrow_global(metadata_addr).derived_balance_function) + } else { + false + } + } + + fun has_supply_dispatch_function(metadata_addr: address): bool { + // Short circuit on APT for better perf + if (metadata_addr != @aptos_fungible_asset) { + exists(metadata_addr) + } else { + false + } + } + public(friend) fun derived_balance_dispatch_function(store: Object): Option acquires FungibleStore, DispatchFunctionStore { let fa_store = borrow_store_resource(&store); let metadata_addr = object::object_address(&fa_store.metadata); @@ -626,6 +768,15 @@ module aptos_framework::fungible_asset { } } + public(friend) fun derived_supply_dispatch_function(metadata: Object): Option acquires DeriveSupply { + let metadata_addr = object::object_address(&metadata); + if (exists(metadata_addr)) { + borrow_global(metadata_addr).dispatch_function + } else { + option::none() + } + } + public fun asset_metadata(fa: &FungibleAsset): Object { fa.metadata } @@ -652,6 +803,9 @@ module aptos_framework::fungible_asset { /// Transfer an `amount` of fungible asset from `from_store`, which should be owned by `sender`, to `receiver`. /// Note: it does not move the underlying object. + /// + /// This function can be in-place replaced by `dispatchable_fungible_asset::transfer`. You should use + /// that function unless you DO NOT want to support fungible assets with dispatchable hooks. public entry fun transfer( sender: &signer, from: Object, @@ -690,15 +844,15 @@ module aptos_framework::fungible_asset { /// Used to delete a store. Requires the store to be completely empty prior to removing it public fun remove_store(delete_ref: &DeleteRef) acquires FungibleStore, FungibleAssetEvents, ConcurrentFungibleBalance { - let store = &object::object_from_delete_ref(delete_ref); - let addr = object::object_address(store); - let FungibleStore { metadata: _, balance, frozen: _ } + let store = object::object_from_delete_ref(delete_ref); + let addr = object::object_address(&store); + let FungibleStore { metadata, balance, frozen: _} = move_from(addr); assert!(balance == 0, error::permission_denied(EBALANCE_IS_NOT_ZERO)); if (concurrent_fungible_balance_exists_inline(addr)) { let ConcurrentFungibleBalance { balance } = move_from(addr); - assert!(aggregator_v2::read(&balance) == 0, error::permission_denied(EBALANCE_IS_NOT_ZERO)); + assert!(balance.read() == 0, error::permission_denied(EBALANCE_IS_NOT_ZERO)); }; // Cleanup deprecated event handles if exist. @@ -712,16 +866,47 @@ module aptos_framework::fungible_asset { event::destroy_handle(withdraw_events); event::destroy_handle(frozen_events); }; + event::emit(FungibleStoreDeletion { + store: addr, + owner: object::owner(store), + metadata: object::object_address(&metadata), + }); } /// Withdraw `amount` of the fungible asset from `store` by the owner. + /// + /// Note: This function can be in-place replaced by `dispatchable_fungible_asset::withdraw`. You should use + /// that function unless you DO NOT want to support fungible assets with dispatchable hooks. public fun withdraw( owner: &signer, store: Object, amount: u64, ): FungibleAsset acquires FungibleStore, DispatchFunctionStore, ConcurrentFungibleBalance { withdraw_sanity_check(owner, store, true); - withdraw_internal(object::object_address(&store), amount) + withdraw_permission_check(owner, store, amount); + unchecked_withdraw(object::object_address(&store), amount) + } + + /// Check the permission for withdraw operation. + public(friend) fun withdraw_permission_check( + owner: &signer, + store: Object, + amount: u64, + ) { + assert!(permissioned_signer::check_permission_consume(owner, amount as u256, WithdrawPermission::ByStore { + store_address: object::object_address(&store), + }), error::permission_denied(EWITHDRAW_PERMISSION_DENIED)); + } + + /// Check the permission for withdraw operation. + public(friend) fun withdraw_permission_check_by_address( + owner: &signer, + store_address: address, + amount: u64, + ) { + assert!(permissioned_signer::check_permission_consume(owner, amount as u256, WithdrawPermission::ByStore { + store_address, + }), error::permission_denied(EWITHDRAW_PERMISSION_DENIED)); } /// Check the permission for withdraw operation. @@ -730,7 +915,19 @@ module aptos_framework::fungible_asset { store: Object, abort_on_dispatch: bool, ) acquires FungibleStore, DispatchFunctionStore { - assert!(object::owns(store, signer::address_of(owner)), error::permission_denied(ENOT_STORE_OWNER)); + withdraw_sanity_check_impl( + signer::address_of(owner), + store, + abort_on_dispatch, + ) + } + + inline fun withdraw_sanity_check_impl( + owner_address: address, + store: Object, + abort_on_dispatch: bool, + ) acquires FungibleStore, DispatchFunctionStore { + assert!(object::owns(store, owner_address), error::permission_denied(ENOT_STORE_OWNER)); let fa_store = borrow_store_resource(&store); assert!( !abort_on_dispatch || !has_withdraw_dispatch_function(fa_store.metadata), @@ -753,9 +950,12 @@ module aptos_framework::fungible_asset { } /// Deposit `amount` of the fungible asset to `store`. + /// + /// Note: This function can be in-place replaced by `dispatchable_fungible_asset::deposit`. You should use + /// that function unless you DO NOT want to support fungible assets with dispatchable hooks. public fun deposit(store: Object, fa: FungibleAsset) acquires FungibleStore, DispatchFunctionStore, ConcurrentFungibleBalance { deposit_sanity_check(store, true); - deposit_internal(object::object_address(&store), fa); + unchecked_deposit(object::object_address(&store), fa); } /// Mint the specified `amount` of the fungible asset. @@ -780,7 +980,7 @@ module aptos_framework::fungible_asset { public fun mint_to(ref: &MintRef, store: Object, amount: u64) acquires FungibleStore, Supply, ConcurrentSupply, DispatchFunctionStore, ConcurrentFungibleBalance { deposit_sanity_check(store, false); - deposit_internal(object::object_address(&store), mint(ref, amount)); + unchecked_deposit(object::object_address(&store), mint(ref, amount)); } /// Enable/disable a store's ability to do direct transfers of the fungible asset. @@ -834,16 +1034,17 @@ module aptos_framework::fungible_asset { amount: u64 ) acquires FungibleStore, Supply, ConcurrentSupply, ConcurrentFungibleBalance { // ref metadata match is checked in burn() call - burn(ref, withdraw_internal(object::object_address(&store), amount)); + burn(ref, unchecked_withdraw(object::object_address(&store), amount)); } - public(friend) fun address_burn_from( + /// Burn the `amount` of the fungible asset from the given store for gas charge. + public(friend) fun address_burn_from_for_gas( ref: &BurnRef, store_addr: address, amount: u64 ) acquires FungibleStore, Supply, ConcurrentSupply, ConcurrentFungibleBalance { // ref metadata match is checked in burn() call - burn(ref, withdraw_internal(store_addr, amount)); + burn(ref, unchecked_withdraw_with_no_events(store_addr, amount)); } /// Withdraw `amount` of the fungible asset from the `store` ignoring `frozen`. @@ -856,7 +1057,7 @@ module aptos_framework::fungible_asset { ref.metadata == store_metadata(store), error::invalid_argument(ETRANSFER_REF_AND_STORE_MISMATCH), ); - withdraw_internal(object::object_address(&store), amount) + unchecked_withdraw(object::object_address(&store), amount) } /// Deposit the fungible asset into the `store` ignoring `frozen`. @@ -869,7 +1070,7 @@ module aptos_framework::fungible_asset { ref.metadata == fa.metadata, error::invalid_argument(ETRANSFER_REF_AND_FUNGIBLE_ASSET_MISMATCH) ); - deposit_internal(object::object_address(&store), fa); + unchecked_deposit(object::object_address(&store), fa); } /// Transfer `amount` of the fungible asset with `TransferRef` even it is frozen. @@ -883,6 +1084,30 @@ module aptos_framework::fungible_asset { deposit_with_ref(transfer_ref, to, fa); } + /// Access raw balance of a store using `RawBalanceRef` + public fun balance_with_ref( + ref: &RawBalanceRef, + store: Object, + ): u64 acquires FungibleStore, ConcurrentFungibleBalance { + assert!( + ref.metadata == store_metadata(store), + error::invalid_argument(ERAW_BALANCE_REF_AND_FUNGIBLE_ASSET_MISMATCH) + ); + balance_impl(store) + } + + /// Access raw supply of a FA using `RawSupplyRef` + public fun supply_with_ref( + ref: &RawSupplyRef, + metadata: Object, + ): Option acquires Supply, ConcurrentSupply { + assert!( + object::object_address(&ref.metadata) == object::object_address(&metadata), + error::invalid_argument(ERAW_BALANCE_REF_AND_FUNGIBLE_ASSET_MISMATCH) + ); + supply_impl(metadata) + } + /// Mutate specified fields of the fungible asset's `Metadata`. public fun mutate_metadata( metadata_ref: &MutateMetadataRef, @@ -896,19 +1121,29 @@ module aptos_framework::fungible_asset { let mutable_metadata = borrow_global_mut(metadata_address); if (option::is_some(&name)){ - mutable_metadata.name = option::extract(&mut name); + let name = option::extract(&mut name); + assert!(string::length(&name) <= MAX_NAME_LENGTH, error::out_of_range(ENAME_TOO_LONG)); + mutable_metadata.name = name; }; if (option::is_some(&symbol)){ - mutable_metadata.symbol = option::extract(&mut symbol); + let symbol = option::extract(&mut symbol); + assert!(string::length(&symbol) <= MAX_SYMBOL_LENGTH, error::out_of_range(ESYMBOL_TOO_LONG)); + mutable_metadata.symbol = symbol; }; if (option::is_some(&decimals)){ - mutable_metadata.decimals = option::extract(&mut decimals); + let decimals = option::extract(&mut decimals); + assert!(decimals <= MAX_DECIMALS, error::out_of_range(EDECIMALS_TOO_LARGE)); + mutable_metadata.decimals = decimals; }; if (option::is_some(&icon_uri)){ - mutable_metadata.icon_uri = option::extract(&mut icon_uri); + let icon_uri = option::extract(&mut icon_uri); + assert!(string::length(&icon_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.icon_uri = icon_uri; }; if (option::is_some(&project_uri)){ - mutable_metadata.project_uri = option::extract(&mut project_uri); + let project_uri = option::extract(&mut project_uri); + assert!(string::length(&project_uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + mutable_metadata.project_uri = project_uri; }; } @@ -945,26 +1180,57 @@ module aptos_framework::fungible_asset { assert!(amount == 0, error::invalid_argument(EAMOUNT_IS_NOT_ZERO)); } - public(friend) fun deposit_internal(store_addr: address, fa: FungibleAsset) acquires FungibleStore, ConcurrentFungibleBalance { + inline fun unchecked_deposit_with_no_events_inline( + store_addr: address, + fa: FungibleAsset + ): u64 acquires FungibleStore, ConcurrentFungibleBalance { let FungibleAsset { metadata, amount } = fa; - if (amount == 0) return; - assert!(exists(store_addr), error::not_found(EFUNGIBLE_STORE_EXISTENCE)); let store = borrow_global_mut(store_addr); assert!(metadata == store.metadata, error::invalid_argument(EFUNGIBLE_ASSET_AND_STORE_MISMATCH)); - if (store.balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) { - let balance_resource = borrow_global_mut(store_addr); - aggregator_v2::add(&mut balance_resource.balance, amount); - } else { - store.balance = store.balance + amount; + if (amount != 0) { + if (store.balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) { + let balance_resource = borrow_global_mut(store_addr); + balance_resource.balance.add(amount); + } else { + store.balance = store.balance + amount; + }; }; + amount + } - event::emit(Deposit { store: store_addr, amount }); + public(friend) fun unchecked_deposit( + store_addr: address, + fa: FungibleAsset + ) acquires FungibleStore, ConcurrentFungibleBalance { + let amount = unchecked_deposit_with_no_events_inline(store_addr, fa); + if (amount != 0) { + event::emit(Deposit { store: store_addr, amount }); + } } - /// Extract `amount` of the fungible asset from `store`. - public(friend) fun withdraw_internal( + public(friend) fun unchecked_deposit_with_no_events( + store_addr: address, + fa: FungibleAsset + ) acquires FungibleStore, ConcurrentFungibleBalance { + unchecked_deposit_with_no_events_inline(store_addr, fa); + } + + /// Extract `amount` of the fungible asset from `store` emitting event. + public(friend) fun unchecked_withdraw( + store_addr: address, + amount: u64 + ): FungibleAsset acquires FungibleStore, ConcurrentFungibleBalance { + let fa = unchecked_withdraw_with_no_events(store_addr, amount); + if (amount != 0) { + event::emit(Withdraw { store: store_addr, amount }); + }; + fa + } + + /// Extract `amount` of the fungible asset from `store` w/o emitting event. + inline fun unchecked_withdraw_with_no_events( store_addr: address, amount: u64, ): FungibleAsset acquires FungibleStore, ConcurrentFungibleBalance { @@ -976,15 +1242,13 @@ module aptos_framework::fungible_asset { if (store.balance == 0 && concurrent_fungible_balance_exists_inline(store_addr)) { let balance_resource = borrow_global_mut(store_addr); assert!( - aggregator_v2::try_sub(&mut balance_resource.balance, amount), + balance_resource.balance.try_sub(amount), error::invalid_argument(EINSUFFICIENT_BALANCE) ); } else { assert!(store.balance >= amount, error::invalid_argument(EINSUFFICIENT_BALANCE)); store.balance = store.balance - amount; }; - - event::emit(Withdraw { store: store_addr, amount }); }; FungibleAsset { metadata, amount } } @@ -999,7 +1263,7 @@ module aptos_framework::fungible_asset { if (exists(metadata_address)) { let supply = borrow_global_mut(metadata_address); assert!( - aggregator_v2::try_add(&mut supply.current, (amount as u128)), + supply.current.try_add(amount as u128), error::out_of_range(EMAX_SUPPLY_EXCEEDED) ); } else if (exists(metadata_address)) { @@ -1028,7 +1292,7 @@ module aptos_framework::fungible_asset { let supply = borrow_global_mut(metadata_address); assert!( - aggregator_v2::try_sub(&mut supply.current, (amount as u128)), + supply.current.try_sub(amount as u128), error::out_of_range(ESUPPLY_UNDERFLOW) ); } else if (exists(metadata_address)) { @@ -1115,6 +1379,57 @@ module aptos_framework::fungible_asset { move_to(&object_signer, ConcurrentFungibleBalance { balance }); } + /// Permission management + /// + /// Master signer grant permissioned signer ability to withdraw a given amount of fungible asset. + public fun grant_permission_by_store( + master: &signer, + permissioned: &signer, + store: Object, + amount: u64 + ) { + permissioned_signer::authorize_increase( + master, + permissioned, + amount as u256, + WithdrawPermission::ByStore { + store_address: object::object_address(&store), + } + ) + } + + public(friend) fun grant_permission_by_address( + master: &signer, + permissioned: &signer, + store_address: address, + amount: u64 + ) { + permissioned_signer::authorize_increase( + master, + permissioned, + amount as u256, + WithdrawPermission::ByStore { store_address } + ) + } + + public(friend) fun refill_permission( + permissioned: &signer, + amount: u64, + store_address: address, + ) { + permissioned_signer::increase_limit( + permissioned, + amount as u256, + WithdrawPermission::ByStore { store_address } + ) + } + + #[deprecated] + /// Removing permissions from permissioned signer. + public fun revoke_permission(_permissioned: &signer, _token_type: Object) { + abort 0 + } + #[test_only] use aptos_framework::account; @@ -1170,6 +1485,9 @@ module aptos_framework::fungible_asset { create_store(&object::create_object_from_account(owner), metadata) } + #[test_only] + use aptos_framework::timestamp; + #[test(creator = @0xcafe)] fun test_metadata_basic_flow(creator: &signer) acquires Metadata, Supply, ConcurrentSupply { let (creator_ref, metadata) = create_test_token(creator); @@ -1247,13 +1565,13 @@ module aptos_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::none(), option::none(), option::none() ); assert!(name(metadata) == string::utf8(b"mutated_name"), 8); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 9); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 9); assert!(decimals(metadata) == 0, 10); assert!(icon_uri(metadata) == string::utf8(b"http://www.example.com/favicon.ico"), 11); assert!(project_uri(metadata) == string::utf8(b"http://www.example.com"), 12); @@ -1301,7 +1619,7 @@ module aptos_framework::fungible_asset { fun test_transfer_with_ref( creator: &signer, aaron: &signer, - ) acquires FungibleStore, Supply, ConcurrentSupply, ConcurrentFungibleBalance { + ) acquires FungibleStore, Supply, ConcurrentSupply, ConcurrentFungibleBalance, DispatchFunctionStore { let (mint_ref, transfer_ref, _burn_ref, _mutate_metadata_ref, _) = create_fungible_asset(creator); let metadata = mint_ref.metadata; let creator_store = create_test_store(creator, metadata); @@ -1328,13 +1646,13 @@ module aptos_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::some(10), option::some(string::utf8(b"http://www.mutated-example.com/favicon.ico")), option::some(string::utf8(b"http://www.mutated-example.com")) ); assert!(name(metadata) == string::utf8(b"mutated_name"), 1); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 2); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 2); assert!(decimals(metadata) == 10, 3); assert!(icon_uri(metadata) == string::utf8(b"http://www.mutated-example.com/favicon.ico"), 4); assert!(project_uri(metadata) == string::utf8(b"http://www.mutated-example.com"), 5); @@ -1350,18 +1668,115 @@ module aptos_framework::fungible_asset { mutate_metadata( &mutate_metadata_ref, option::some(string::utf8(b"mutated_name")), - option::some(string::utf8(b"mutated_symbol")), + option::some(string::utf8(b"m_symbol")), option::none(), option::none(), option::none() ); assert!(name(metadata) == string::utf8(b"mutated_name"), 8); - assert!(symbol(metadata) == string::utf8(b"mutated_symbol"), 9); + assert!(symbol(metadata) == string::utf8(b"m_symbol"), 9); assert!(decimals(metadata) == 0, 10); assert!(icon_uri(metadata) == string::utf8(b"http://www.example.com/favicon.ico"), 11); assert!(project_uri(metadata) == string::utf8(b"http://www.example.com"), 12); } + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x2000f, location = Self)] + fun test_mutate_metadata_name_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::some(string::utf8(b"mutated_name_will_be_too_long_for_the_maximum_length_check")), + option::none(), + option::none(), + option::none(), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20010, location = Self)] + fun test_mutate_metadata_symbol_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::some(string::utf8(b"mutated_symbol_will_be_too_long_for_the_maximum_length_check")), + option::none(), + option::none(), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20011, location = Self)] + fun test_mutate_metadata_decimals_over_maximum_amount( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::some(50), + option::none(), + option::none() + ); + } + + #[test_only] + fun create_exceedingly_long_uri(): vector { + use std::vector; + + let too_long_of_uri = b"mutated_uri_will_be_too_long_for_the_maximum_length_check.com/"; + for (i in 0..50) { + vector::append(&mut too_long_of_uri, b"too_long_of_uri"); + }; + + too_long_of_uri + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20013, location = Self)] + fun test_mutate_metadata_icon_uri_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + let too_long_of_uri = create_exceedingly_long_uri(); + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::none(), + option::some(string::utf8(too_long_of_uri)), + option::none() + ); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x20013, location = Self)] + fun test_mutate_metadata_project_uri_over_maximum_length( + creator: &signer + ) acquires Metadata { + let (_mint_ref, _transfer_ref, _burn_ref, mutate_metadata_ref, _) = create_fungible_asset(creator); + let too_long_of_uri = create_exceedingly_long_uri(); + mutate_metadata( + &mutate_metadata_ref, + option::none(), + option::none(), + option::none(), + option::none(), + option::some(string::utf8(too_long_of_uri)) + ); + } + #[test(creator = @0xcafe)] fun test_merge_and_exact(creator: &signer) acquires Supply, ConcurrentSupply { let (mint_ref, _transfer_ref, burn_ref, _mutate_metadata_ref, _) = create_fungible_asset(creator); @@ -1444,7 +1859,7 @@ module aptos_framework::fungible_asset { assert!(exists(object::object_address(&creator_store)), 9); assert!(borrow_store_resource(&creator_store).balance == 0, 10); assert!(exists(object::object_address(&creator_store)), 11); - assert!(aggregator_v2::read(&borrow_global(object::object_address(&creator_store)).balance) == 10, 12); + assert!(borrow_global(object::object_address(&creator_store)).balance.read() == 10, 12); deposit_with_ref(&transfer_ref, creator_store, fb); } @@ -1474,7 +1889,90 @@ module aptos_framework::fungible_asset { assert!(exists(object::object_address(&creator_store)), 9); assert!(borrow_store_resource(&creator_store).balance == 0, 10); assert!(exists(object::object_address(&creator_store)), 11); - assert!(aggregator_v2::read(&borrow_global(object::object_address(&creator_store)).balance) == 30, 12); + assert!(borrow_global(object::object_address(&creator_store)).balance.read() == 30, 12); + } + + #[test(creator = @0xcafe, aaron = @0xface)] + fun test_e2e_withdraw_limit( + creator: &signer, + aaron: &signer, + ) acquires FungibleStore, Supply, ConcurrentSupply, DispatchFunctionStore, ConcurrentFungibleBalance { + let aptos_framework = account::create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let (mint_ref, _, _, _, test_token) = create_fungible_asset(creator); + let metadata = mint_ref.metadata; + let creator_store = create_test_store(creator, metadata); + let aaron_store = create_test_store(aaron, metadata); + + assert!(supply(test_token) == option::some(0), 1); + // Mint + let fa = mint(&mint_ref, 100); + assert!(supply(test_token) == option::some(100), 2); + // Deposit + deposit(creator_store, fa); + // Withdraw + let fa = withdraw(creator, creator_store, 80); + assert!(supply(test_token) == option::some(100), 3); + deposit(aaron_store, fa); + + // Create a permissioned signer + let aaron_permission_handle = permissioned_signer::create_permissioned_handle(aaron); + let aaron_permission_signer = permissioned_signer::signer_from_permissioned_handle(&aaron_permission_handle); + + // Grant aaron_permission_signer permission to withdraw 10 FA + grant_permission_by_store(aaron, &aaron_permission_signer, aaron_store, 10); + + let fa = withdraw(&aaron_permission_signer, aaron_store, 5); + deposit(aaron_store, fa); + + let fa = withdraw(&aaron_permission_signer, aaron_store, 5); + deposit(aaron_store, fa); + + // aaron signer don't abide to the same limit + let fa = withdraw(aaron, aaron_store, 5); + deposit(aaron_store, fa); + + permissioned_signer::destroy_permissioned_handle(aaron_permission_handle); + } + + #[test(creator = @0xcafe, aaron = @0xface)] + #[expected_failure(abort_code = 0x50024, location = Self)] + fun test_e2e_withdraw_limit_exceeds( + creator: &signer, + aaron: &signer, + ) acquires FungibleStore, Supply, ConcurrentSupply, DispatchFunctionStore, ConcurrentFungibleBalance { + let aptos_framework = account::create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let (mint_ref, _, _, _, test_token) = create_fungible_asset(creator); + let metadata = mint_ref.metadata; + let creator_store = create_test_store(creator, metadata); + let aaron_store = create_test_store(aaron, metadata); + + assert!(supply(test_token) == option::some(0), 1); + // Mint + let fa = mint(&mint_ref, 100); + assert!(supply(test_token) == option::some(100), 2); + // Deposit + deposit(creator_store, fa); + // Withdraw + let fa = withdraw(creator, creator_store, 80); + assert!(supply(test_token) == option::some(100), 3); + deposit(aaron_store, fa); + + // Create a permissioned signer + let aaron_permission_handle = permissioned_signer::create_permissioned_handle(aaron); + let aaron_permission_signer = permissioned_signer::signer_from_permissioned_handle(&aaron_permission_handle); + + // Grant aaron_permission_signer permission to withdraw 10 FA + grant_permission_by_store(aaron, &aaron_permission_signer, aaron_store, 10); + + // Withdrawing more than 10 FA yield an error. + let fa = withdraw(&aaron_permission_signer, aaron_store, 11); + deposit(aaron_store, fa); + + permissioned_signer::destroy_permissioned_handle(aaron_permission_handle); } #[deprecated] diff --git a/aptos-move/framework/aptos-framework/sources/genesis.move b/aptos-move/framework/aptos-framework/sources/genesis.move index 03944b937158f..0dffe26dcf9b3 100644 --- a/aptos-move/framework/aptos-framework/sources/genesis.move +++ b/aptos-move/framework/aptos-framework/sources/genesis.move @@ -10,7 +10,6 @@ module aptos_framework::genesis { use aptos_framework::aptos_account; use aptos_framework::aptos_coin::{Self, AptosCoin}; use aptos_framework::aptos_governance; - use aptos_framework::native_bridge; use aptos_framework::block; use aptos_framework::chain_id; use aptos_framework::chain_status; @@ -19,6 +18,7 @@ module aptos_framework::genesis { use aptos_framework::execution_config; use aptos_framework::create_signer::create_signer; use aptos_framework::gas_schedule; + use aptos_framework::nonce_validation; use aptos_framework::reconfiguration; use aptos_framework::stake; use aptos_framework::staking_contract; @@ -30,7 +30,6 @@ module aptos_framework::genesis { use aptos_framework::transaction_validation; use aptos_framework::version; use aptos_framework::vesting; - use aptos_framework::governed_gas_pool; const EDUPLICATE_ACCOUNT: u64 = 1; const EACCOUNT_DOES_NOT_EXIST: u64 = 2; @@ -95,7 +94,6 @@ module aptos_framework::genesis { b"multi_agent_script_prologue", b"epilogue", ); - // Give the decentralized on-chain governance control over the core framework account. aptos_governance::store_signer_cap(&aptos_framework_account, @aptos_framework, aptos_framework_signer_cap); @@ -111,6 +109,7 @@ module aptos_framework::genesis { execution_config::set(&aptos_framework_account, execution_config); version::initialize(&aptos_framework_account, initial_version); stake::initialize(&aptos_framework_account); + timestamp::set_time_has_started(&aptos_framework_account); staking_config::initialize( &aptos_framework_account, minimum_stake, @@ -126,14 +125,12 @@ module aptos_framework::genesis { // Ensure we can create aggregators for supply, but not enable it for common use just yet. aggregator_factory::initialize_aggregator_factory(&aptos_framework_account); - coin::initialize_supply_config(&aptos_framework_account); chain_id::initialize(&aptos_framework_account, chain_id); reconfiguration::initialize(&aptos_framework_account); block::initialize(&aptos_framework_account, epoch_interval_microsecs); state_storage::initialize(&aptos_framework_account); - timestamp::set_time_has_started(&aptos_framework_account); - native_bridge::initialize(&aptos_framework_account); + nonce_validation::initialize(&aptos_framework_account); } /// Genesis step 2: Initialize Aptos coin. @@ -151,13 +148,6 @@ module aptos_framework::genesis { transaction_fee::store_aptos_coin_mint_cap(aptos_framework, mint_cap); } - fun initialize_governed_gas_pool( - aptos_framework: &signer, - delegation_pool_creation_seed: vector, - ) { - governed_gas_pool::initialize(aptos_framework, delegation_pool_creation_seed); - } - /// Only called for testnets and e2e tests. fun initialize_core_resources_and_aptos_coin( aptos_framework: &signer, @@ -174,6 +164,7 @@ module aptos_framework::genesis { transaction_fee::store_aptos_coin_burn_cap(aptos_framework, burn_cap); // Give transaction_fee module MintCapability so it can mint refunds. transaction_fee::store_aptos_coin_mint_cap(aptos_framework, mint_cap); + let core_resources = account::create_account(@core_resources); account::rotate_authentication_key_internal(&core_resources, core_resources_auth_key); aptos_account::register_apt(&core_resources); // registers APT store @@ -201,14 +192,17 @@ module aptos_framework::genesis { /// This creates an funds an account if it doesn't exist. /// If it exists, it just returns the signer. fun create_account(aptos_framework: &signer, account_address: address, balance: u64): signer { - if (account::exists_at(account_address)) { + let account = if (account::exists_at(account_address)) { create_signer(account_address) } else { - let account = account::create_account(account_address); + account::create_account(account_address) + }; + + if (coin::balance(account_address) == 0) { coin::register(&account); aptos_coin::mint(aptos_framework, account_address, balance); - account - } + }; + account } fun create_employee_validators( @@ -279,6 +273,8 @@ module aptos_framework::genesis { }; let validator = &employee_group.validator.validator_config; + // These checks ensure that validator accounts have 0x1::Account resource. + // So, validator accounts can't be stateless. assert!( account::exists_at(validator.owner_address), error::not_found(EACCOUNT_DOES_NOT_EXIST), diff --git a/aptos-move/framework/aptos-framework/sources/genesis.spec.move b/aptos-move/framework/aptos-framework/sources/genesis.spec.move index 439d72c81b43c..5e573f999d4fe 100644 --- a/aptos-move/framework/aptos-framework/sources/genesis.spec.move +++ b/aptos-move/framework/aptos-framework/sources/genesis.spec.move @@ -108,6 +108,10 @@ spec aptos_framework::genesis { ensures exists(@aptos_framework); } + spec initialize_validator { + pragma verify_duration_estimate = 120; + } + spec create_initialize_validators_with_commission { pragma verify_duration_estimate = 120; @@ -127,6 +131,7 @@ spec aptos_framework::genesis { } spec create_initialize_validator { + pragma verify_duration_estimate = 120; include stake::ResourceRequirement; } @@ -157,10 +162,8 @@ spec aptos_framework::genesis { requires chain_status::is_operating(); requires len(execution_config) > 0; requires exists(@aptos_framework); - requires exists(@aptos_framework); requires exists>(@aptos_framework); include CompareTimeRequires; - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; } spec schema CompareTimeRequires { diff --git a/aptos-move/framework/aptos-framework/sources/governed_gas_pool.move b/aptos-move/framework/aptos-framework/sources/governed_gas_pool.move deleted file mode 100644 index 830b00499ec92..0000000000000 --- a/aptos-move/framework/aptos-framework/sources/governed_gas_pool.move +++ /dev/null @@ -1,364 +0,0 @@ -module aptos_framework::governed_gas_pool { - - friend aptos_framework::transaction_validation; - - use std::vector; - use aptos_framework::account::{Self, SignerCapability, create_signer_with_capability}; - use aptos_framework::system_addresses::{Self}; - // use aptos_framework::primary_fungible_store::{Self}; - use aptos_framework::fungible_asset::{Self}; - use aptos_framework::object::{Self}; - use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::coin::{Self, Coin}; - use std::features; - use aptos_framework::signer; - use aptos_framework::aptos_account::Self; - #[test_only] - use aptos_framework::coin::{BurnCapability, MintCapability}; - #[test_only] - use aptos_framework::fungible_asset::BurnRef; - #[test_only] - use aptos_framework::aptos_coin::Self; - - const MODULE_SALT: vector = b"aptos_framework::governed_gas_pool"; - - /// The Governed Gas Pool - /// Internally, this is a simply wrapper around a resource account. - struct GovernedGasPool has key { - /// The signer capability of the resource account. - signer_capability: SignerCapability, - } - - /// Address of APT Primary Fungible Store - inline fun primary_fungible_store_address(account: address): address { - object::create_user_derived_object_address(account, @aptos_fungible_asset) - } - - /// Create the seed to derive the resource account address. - fun create_resource_account_seed( - delegation_pool_creation_seed: vector, - ): vector { - let seed = vector::empty(); - // include module salt (before any subseeds) to avoid conflicts with other modules creating resource accounts - vector::append(&mut seed, MODULE_SALT); - // include an additional salt in case the same resource account has already been created - vector::append(&mut seed, delegation_pool_creation_seed); - seed - } - - /// Initializes the governed gas pool around a resource account creation seed. - /// @param aptos_framework The signer of the aptos_framework module. - /// @param delegation_pool_creation_seed The seed to be used to create the resource account hosting the delegation pool. - public fun initialize( - aptos_framework: &signer, - delegation_pool_creation_seed: vector, - ) { - system_addresses::assert_aptos_framework(aptos_framework); - - // return if the governed gas pool has already been initialized - if (exists(signer::address_of(aptos_framework))) { - return - }; - - // generate a seed to be used to create the resource account hosting the delegation pool - let seed = create_resource_account_seed(delegation_pool_creation_seed); - - let (governed_gas_pool_signer, governed_gas_pool_signer_cap) = account::create_resource_account(aptos_framework, seed); - - // register apt - aptos_account::register_apt(&governed_gas_pool_signer); - - move_to(aptos_framework, GovernedGasPool{ - signer_capability: governed_gas_pool_signer_cap, - }); - } - - /// Initialize the governed gas pool as a module - /// @param aptos_framework The signer of the aptos_framework module. - fun init_module(aptos_framework: &signer) { - // Initialize the governed gas pool - let seed : vector = b"aptos_framework::governed_gas_pool"; - initialize(aptos_framework, seed); - } - - /// Borrows the signer of the governed gas pool. - /// @return The signer of the governed gas pool. - fun governed_gas_signer(): signer acquires GovernedGasPool { - let signer_cap = &borrow_global(@aptos_framework).signer_capability; - create_signer_with_capability(signer_cap) - } - - #[view] - /// Gets the address of the governed gas pool. - /// @return The address of the governed gas pool. - public fun governed_gas_pool_address(): address acquires GovernedGasPool { - signer::address_of(&governed_gas_signer()) - } - - /// Funds the destination account with a given amount of coin. - /// @param account The account to be funded. - /// @param amount The amount of coin to be funded. - public fun fund(aptos_framework: &signer, account: address, amount: u64) acquires GovernedGasPool { - // Check that the Aptos framework is the caller - // This is what ensures that funding can only be done by the Aptos framework, - // i.e., via a governance proposal. - system_addresses::assert_aptos_framework(aptos_framework); - let governed_gas_signer = &governed_gas_signer(); - coin::deposit(account, coin::withdraw(governed_gas_signer, amount)); - } - - /// Deposits some coin into the governed gas pool. - /// @param coin The coin to be deposited. - fun deposit(coin: Coin) acquires GovernedGasPool { - let governed_gas_pool_address = governed_gas_pool_address(); - coin::deposit(governed_gas_pool_address, coin); - } - - /// Deposits some coin from an account to the governed gas pool. - /// @param account The account from which the coin is to be deposited. - /// @param amount The amount of coin to be deposited. - fun deposit_from(account: address, amount: u64) acquires GovernedGasPool { - deposit(coin::withdraw_from(account, amount)); - } - - /// Deposits some FA from the fungible store. - /// @param aptos_framework The signer of the aptos_framework module. - /// @param account The account from which the FA is to be deposited. - /// @param amount The amount of FA to be deposited. - fun deposit_from_fungible_store(account: address, amount: u64) acquires GovernedGasPool { - if (amount > 0){ - // compute the governed gas pool store address - let governed_gas_pool_address = governed_gas_pool_address(); - let governed_gas_pool_store_address = primary_fungible_store_address(governed_gas_pool_address); - - // compute the account store address - let account_store_address = primary_fungible_store_address(account); - fungible_asset::deposit_internal( - governed_gas_pool_store_address, - fungible_asset::withdraw_internal( - account_store_address, - amount - ) - ); - } - } - - /// Deposits gas fees into the governed gas pool. - /// @param gas_payer The address of the account that paid the gas fees. - /// @param gas_fee The amount of gas fees to be deposited. - public fun deposit_gas_fee(_gas_payer: address, _gas_fee: u64) acquires GovernedGasPool { - // get the sender to preserve the signature but do nothing - governed_gas_pool_address(); - } - - /// Deposits gas fees into the governed gas pool. - /// @param gas_payer The address of the account that paid the gas fees. - /// @param gas_fee The amount of gas fees to be deposited. - public(friend) fun deposit_gas_fee_v2(gas_payer: address, gas_fee: u64) acquires GovernedGasPool { - if (features::operations_default_to_fa_apt_store_enabled()) { - deposit_from_fungible_store(gas_payer, gas_fee); - } else { - deposit_from(gas_payer, gas_fee); - }; - } - - #[view] - /// Gets the balance of a specified coin type in the governed gas pool. - /// @return The balance of the coin in the pool. - public fun get_balance(): u64 acquires GovernedGasPool { - let pool_address = governed_gas_pool_address(); - coin::balance(pool_address) - } - - #[test_only] - /// The AptosCoin mint capability - struct AptosCoinMintCapability has key { - mint_cap: MintCapability, - } - - #[test_only] - /// The AptosCoin burn capability - struct AptosCoinBurnCapability has key { - burn_cap: BurnCapability, - } - - #[test_only] - /// The AptosFA burn capabilities - struct AptosFABurnCapabilities has key { - burn_ref: BurnRef, - } - - - #[test_only] - /// Stores the mint capability for AptosCoin. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param mint_cap The mint capability for AptosCoin. - public fun store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: MintCapability) { - system_addresses::assert_aptos_framework(aptos_framework); - move_to(aptos_framework, AptosCoinMintCapability { mint_cap }) - } - - #[test_only] - /// Stores the burn capability for AptosCoin, converting to a fungible asset reference if the feature is enabled. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param burn_cap The burn capability for AptosCoin. - public fun store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: BurnCapability) { - system_addresses::assert_aptos_framework(aptos_framework); - if (features::operations_default_to_fa_apt_store_enabled()) { - let burn_ref = coin::convert_and_take_paired_burn_ref(burn_cap); - move_to(aptos_framework, AptosFABurnCapabilities { burn_ref }); - } else { - move_to(aptos_framework, AptosCoinBurnCapability { burn_cap }) - } - } - - #[test_only] - /// Initializes the governed gas pool around a fixed creation seed for testing - /// - /// @param aptos_framework The signer of the aptos_framework module. - public fun initialize_for_test( - aptos_framework: &signer, - ) { - - // initialize the AptosCoin module - let (burn_cap, mint_cap) = aptos_coin::initialize_for_test(aptos_framework); - - // Initialize the governed gas pool - let seed : vector = b"test"; - initialize(aptos_framework, seed); - - // add the mint capability to the governed gas pool - store_aptos_coin_mint_cap(aptos_framework, mint_cap); - store_aptos_coin_burn_cap(aptos_framework, burn_cap); - - } - - #[test_only] - /// Mints some coin to an account for testing purposes. - /// - /// @param account The account to which the coin is to be minted. - /// @param amount The amount of coin to be minted. - public fun mint_for_test(account: address, amount: u64) acquires AptosCoinMintCapability { - coin::deposit(account, coin::mint( - amount, - &borrow_global(@aptos_framework).mint_cap - )); - } - - #[test(aptos_framework = @aptos_framework, depositor = @0xdddd)] - /// Deposits some coin into the governed gas pool. - /// - /// @param aptos_framework is the signer of the aptos_framework module. - fun test_governed_gas_pool_deposit(aptos_framework: &signer, depositor: &signer) acquires GovernedGasPool, AptosCoinMintCapability { - - // initialize the modules - initialize_for_test(aptos_framework); - - // create the depositor account and fund it - aptos_account::create_account(signer::address_of(depositor)); - mint_for_test(signer::address_of(depositor), 1000); - - // get the balances for the depositor and the governed gas pool - let depositor_balance = coin::balance(signer::address_of(depositor)); - let governed_gas_pool_balance = coin::balance(governed_gas_pool_address()); - - // deposit some coin into the governed gas pool - deposit_from(signer::address_of(depositor), 100); - - // check the balances after the deposit - assert!(coin::balance(signer::address_of(depositor)) == depositor_balance - 100, 1); - assert!(coin::balance(governed_gas_pool_address()) == governed_gas_pool_balance + 100, 2); - - } - - #[test(aptos_framework = @aptos_framework, depositor = @0xdddd)] - /// Deposits some coin from an account to the governed gas pool as gas fees. - /// - /// @param aptos_framework is the signer of the aptos_framework module. - /// @param depositor is the signer of the account from which the coin is to be deposited. - fun test_governed_gas_pool_deposit_gas_fee(aptos_framework: &signer, depositor: &signer) acquires GovernedGasPool, AptosCoinMintCapability { - - // initialize the modules - initialize_for_test(aptos_framework); - - // create the depositor account and fund it - aptos_account::create_account(signer::address_of(depositor)); - mint_for_test(signer::address_of(depositor), 1000); - - // get the balances for the depositor and the governed gas pool - let depositor_balance = coin::balance(signer::address_of(depositor)); - let governed_gas_pool_balance = coin::balance(governed_gas_pool_address()); - - // deposit some coin into the governed gas pool as gas fees - deposit_gas_fee_v2(signer::address_of(depositor), 100); - - // check the balances after the deposit - assert!(coin::balance(signer::address_of(depositor)) == depositor_balance - 100, 1); - assert!(coin::balance(governed_gas_pool_address()) == governed_gas_pool_balance + 100, 2); - - } - - #[test(aptos_framework = @aptos_framework)] - /// Test for the get_balance view method. - fun test_governed_gas_pool_get_balance(aptos_framework: &signer) acquires GovernedGasPool, AptosCoinMintCapability { - - // initialize the modules - initialize_for_test(aptos_framework); - - // fund the governed gas pool - let governed_gas_pool_address = governed_gas_pool_address(); - mint_for_test(governed_gas_pool_address, 1000); - - // assert the balance is correct - assert!(get_balance() == 1000, 1); - } - - #[test(aptos_framework = @aptos_framework, depositor = @0xdddd, beneficiary = @0xbbbb)] - /// Funds the destination account with a given amount of coin. - /// - /// @param aptos_framework is the signer of the aptos_framework module. - /// @param depositor is the signer of the account from which the coin is to be funded. - /// @param beneficiary is the address of the account to be funded. - fun test_governed_gas_pool_fund(aptos_framework: &signer, depositor: &signer, beneficiary: &signer) acquires GovernedGasPool, AptosCoinMintCapability { - - // initialize the modules - initialize_for_test(aptos_framework); - - // create the depositor account and fund it - aptos_account::create_account(signer::address_of(depositor)); - mint_for_test(signer::address_of(depositor), 1000); - - // get the balances for the depositor and the governed gas pool - let depositor_balance = coin::balance(signer::address_of(depositor)); - let governed_gas_pool_balance = coin::balance(governed_gas_pool_address()); - - // collect gas fees from the depositor - deposit_gas_fee_v2(signer::address_of(depositor), 100); - - // check the balances after the deposit - assert!(coin::balance(signer::address_of(depositor)) == depositor_balance - 100, 1); - assert!(coin::balance(governed_gas_pool_address()) == governed_gas_pool_balance + 100, 2); - - // ensure the beneficiary account has registered with the AptosCoin module - aptos_account::create_account(signer::address_of(beneficiary)); - aptos_account::register_apt(beneficiary); - - // fund the beneficiary account - fund(aptos_framework, signer::address_of(beneficiary), 100); - - // check the balances after the funding - assert!(coin::balance(governed_gas_pool_address()) == governed_gas_pool_balance, 3); - assert!(coin::balance(signer::address_of(beneficiary)) == 100, 4); - - } - - #[test(aptos_framework = @aptos_framework)] - fun test_initialize_is_idempotent(aptos_framework: &signer) { - // initialize the governed gas pool - initialize_for_test(aptos_framework); - // initialize the governed gas pool again, no abort - initialize(aptos_framework, vector::empty()); - } -} diff --git a/aptos-move/framework/aptos-framework/sources/governed_gas_pool.spec.move b/aptos-move/framework/aptos-framework/sources/governed_gas_pool.spec.move deleted file mode 100644 index 3b6ee84b31c93..0000000000000 --- a/aptos-move/framework/aptos-framework/sources/governed_gas_pool.spec.move +++ /dev/null @@ -1,79 +0,0 @@ -spec aptos_framework::governed_gas_pool { - use aptos_framework::coin::EINSUFFICIENT_BALANCE; - use aptos_framework::error; - - /// - /// No.: 1 - /// Requirement: The GovernedGasPool resource must exist at the aptos_framework address after initialization. - /// Criticality: Critical - /// Implementation: The initialize function ensures the resource is created at the aptos_framework address. - /// Enforcement: Formally verified via [high-level-req-1](initialize). - /// - /// No.: 2 - /// Requirement: Only the aptos_framework address is allowed to initialize the GovernedGasPool. - /// Criticality: Critical - /// Implementation: The initialize function verifies the signer is the aptos_framework address. - /// Enforcement: Formally verified via [high-level-req-2](initialize). - /// - /// No.: 3 - /// Requirement: Deposits into the GovernedGasPool must be reflected in the pool's balance. - /// Criticality: High - /// Implementation: The deposit and deposit_from functions update the pool's balance. - /// Enforcement: Formally verified via [high-level-req-3](deposit), [high-level-req-3.1](deposit_from). - /// - /// No.: 4 - /// Requirement: Only the aptos_framework address can fund accounts from the GovernedGasPool. - /// Criticality: High - /// Implementation: The fund function verifies the signer is the aptos_framework address. - /// Enforcement: Formally verified via [high-level-req-4](fund). - /// - - spec module { - /// [high-level-req-1] - /// The GovernedGasPool resource must exist at aptos_framework after initialization. - invariant exists(@aptos_framework); - } - - spec initialize(aptos_framework: &signer, delegation_pool_creation_seed: vector) { - requires system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); - /// [high-level-req-1] - ensures exists(@aptos_framework); - } - - spec fund(aptos_framework: &signer, account: address, amount: u64) { - pragma aborts_if_is_partial = true; - - /// [high-level-req-4] - // Abort if the caller is not the Aptos framework - aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); - - /// Abort if the governed gas pool has insufficient funds - aborts_with coin::EINSUFFICIENT_BALANCE, error::invalid_argument(EINSUFFICIENT_BALANCE), 0x1, 0x5, 0x7; - } - - spec deposit(coin: Coin) { - pragma aborts_if_is_partial = true; - - /* - /// [high-level-req-3] - /// Ensure the deposit increases the value in the CoinStore - - //@TODO: Calling governed_gas_pool_adddress() doesn't work as the boogie gen cant check the signer - // created for the resource account created at runtime - - /// Ensure the governed gas pool resource account exists - //aborts_if !exists>(governed_gas_pool_address()); - - //ensures global>(aptos_framework_address).coin.value == - //old(global>(aptos_framework_address).coin.value) + coin.value; - */ - } - - spec deposit_gas_fee(_gas_payer: address, _gas_fee: u64) { - /* - /// [high-level-req-5] - // ensures governed_gas_pool_balance == old(governed_gas_pool_balance) + gas_fee; - // ensures gas_payer_balance == old(gas_payer_balance) - gas_fee; - */ - } -} diff --git a/aptos-move/framework/aptos-framework/sources/jwks.move b/aptos-move/framework/aptos-framework/sources/jwks.move index c0bcdc7468250..ecd2ade70c3ba 100644 --- a/aptos-move/framework/aptos-framework/sources/jwks.move +++ b/aptos-move/framework/aptos-framework/sources/jwks.move @@ -5,9 +5,12 @@ /// write some of the resources in this file. As a result, the structs in this file are declared so as to /// have a simple layout which is easily accessible in Rust. module aptos_framework::jwks { + use std::bcs; use std::error; + use std::features; use std::option; use std::option::Option; + use std::signer; use std::string; use std::string::{String, utf8}; use std::vector; @@ -25,12 +28,19 @@ module aptos_framework::jwks { friend aptos_framework::genesis; friend aptos_framework::reconfiguration_with_dkg; + /// We limit the size of a `PatchedJWKs` resource installed by a dapp owner for federated keyless accounts. + /// Note: If too large, validators waste work reading it for invalid TXN signatures. + const MAX_FEDERATED_JWKS_SIZE_BYTES: u64 = 2 * 1024; // 2 KiB + const EUNEXPECTED_EPOCH: u64 = 1; const EUNEXPECTED_VERSION: u64 = 2; const EUNKNOWN_PATCH_VARIANT: u64 = 3; const EUNKNOWN_JWK_VARIANT: u64 = 4; const EISSUER_NOT_FOUND: u64 = 5; const EJWK_ID_NOT_FOUND: u64 = 6; + const EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK: u64 = 7; + const EFEDERATED_JWKS_TOO_LARGE: u64 = 8; + const EINVALID_FEDERATED_JWK_SET: u64 = 9; const ENATIVE_MISSING_RESOURCE_VALIDATOR_SET: u64 = 0x0101; const ENATIVE_MISSING_RESOURCE_OBSERVED_JWKS: u64 = 0x0102; @@ -38,6 +48,8 @@ module aptos_framework::jwks { const ENATIVE_MULTISIG_VERIFICATION_FAILED: u64 = 0x0104; const ENATIVE_NOT_ENOUGH_VOTING_POWER: u64 = 0x0105; + const DELETE_COMMAND_INDICATOR: vector = b"THIS_IS_A_DELETE_COMMAND"; + /// An OIDC provider. struct OIDCProvider has copy, drop, store { /// The utf-8 encoded issuer string. E.g., b"https://www.facebook.com". @@ -155,11 +167,115 @@ module aptos_framework::jwks { jwks: AllProvidersJWKs, } + /// JWKs for federated keyless accounts are stored in this resource. + struct FederatedJWKs has drop, key { + jwks: AllProvidersJWKs, + } + // // Structs end. // Functions begin. // + /// Called by a federated keyless dapp owner to install the JWKs for the federated OIDC provider (e.g., Auth0, AWS + /// Cognito, etc). For type-safety, we explicitly use a `struct FederatedJWKs { jwks: AllProviderJWKs }` instead of + /// reusing `PatchedJWKs { jwks: AllProviderJWKs }`, which is a JWK-consensus-specific struct. + public fun patch_federated_jwks(jwk_owner: &signer, patches: vector) acquires FederatedJWKs { + // Prevents accidental calls in 0x1::jwks that install federated JWKs at the Aptos framework address. + assert!(!system_addresses::is_aptos_framework_address(signer::address_of(jwk_owner)), + error::invalid_argument(EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK) + ); + + let jwk_addr = signer::address_of(jwk_owner); + if (!exists(jwk_addr)) { + move_to(jwk_owner, FederatedJWKs { jwks: AllProvidersJWKs { entries: vector[] } }); + }; + + let fed_jwks = borrow_global_mut(jwk_addr); + vector::for_each_ref(&patches, |obj|{ + let patch: &Patch = obj; + apply_patch(&mut fed_jwks.jwks, *patch); + }); + + // TODO: Can we check the size more efficiently instead of serializing it via BCS? + let num_bytes = vector::length(&bcs::to_bytes(fed_jwks)); + assert!(num_bytes < MAX_FEDERATED_JWKS_SIZE_BYTES, error::invalid_argument(EFEDERATED_JWKS_TOO_LARGE)); + } + + /// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should + /// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + /// + /// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. + /// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. + /// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + /// + /// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - + /// ```json + /// { + /// "keys": [ + /// { + /// "alg": "RS256", + /// "use": "sig", + /// "kty": "RSA", + /// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", + /// "kid": "d7b939771a7800c413f90051012d975981916d71", + /// "e": "AQAB" + /// }, + /// { + /// "kty": "RSA", + /// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", + /// "alg": "RS256", + /// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", + /// "e": "AQAB", + /// "use": "sig" + /// } + /// ] + /// } + /// ``` + /// + /// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector + /// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 + /// the corresponding attribute in the second JWK as shown below. + /// + /// ```move + /// use std::string::utf8; + /// aptos_framework::jwks::update_federated_jwk_set( + /// jwk_owner, + /// b"https://accounts.google.com", + /// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], + /// vector[utf8(b"RS256"), utf8(b"RS256")], + /// vector[utf8(b"AQAB"), utf8(b"AQAB")], + /// vector[ + /// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), + /// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") + /// ] + /// ) + /// ``` + /// + /// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + /// + /// NOTE: Currently only RSA keys are supported. + public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector, kid_vec: vector, alg_vec: vector, e_vec: vector, n_vec: vector) acquires FederatedJWKs { + assert!(!vector::is_empty(&kid_vec), error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + let num_jwk = vector::length(&kid_vec); + assert!(vector::length(&alg_vec) == num_jwk , error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + assert!(vector::length(&e_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + assert!(vector::length(&n_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET)); + + let remove_all_patch = new_patch_remove_all(); + let patches = vector[remove_all_patch]; + while (!vector::is_empty(&kid_vec)) { + let kid = vector::pop_back(&mut kid_vec); + let alg = vector::pop_back(&mut alg_vec); + let e = vector::pop_back(&mut e_vec); + let n = vector::pop_back(&mut n_vec); + let jwk = new_rsa_jwk(kid, alg, e, n); + let patch = new_patch_upsert_jwk(iss, jwk); + vector::push_back(&mut patches, patch) + }; + patch_federated_jwks(jwk_owner, patches); + } + /// Get a JWK by issuer and key ID from the `PatchedJWKs`. /// Abort if such a JWK does not exist. /// More convenient to call from Rust, since it does not wrap the JWK in an `Option`. @@ -202,9 +318,9 @@ module aptos_framework::jwks { system_addresses::assert_aptos_framework(fx); let provider_set = if (config_buffer::does_exist()) { - config_buffer::extract() + config_buffer::extract_v2() } else { - *borrow_global_mut(@aptos_framework) + *borrow_global(@aptos_framework) }; let old_config_url = remove_oidc_provider_internal(&mut provider_set, name); @@ -237,9 +353,9 @@ module aptos_framework::jwks { system_addresses::assert_aptos_framework(fx); let provider_set = if (config_buffer::does_exist()) { - config_buffer::extract() + config_buffer::extract_v2() } else { - *borrow_global_mut(@aptos_framework) + *borrow_global(@aptos_framework) }; let ret = remove_oidc_provider_internal(&mut provider_set, name); config_buffer::upsert(provider_set); @@ -250,7 +366,7 @@ module aptos_framework::jwks { public(friend) fun on_new_epoch(framework: &signer) acquires SupportedOIDCProviders { system_addresses::assert_aptos_framework(framework); if (config_buffer::does_exist()) { - let new_config = config_buffer::extract(); + let new_config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = new_config; } else { @@ -346,10 +462,42 @@ module aptos_framework::jwks { public fun upsert_into_observed_jwks(fx: &signer, provider_jwks_vec: vector) acquires ObservedJWKs, PatchedJWKs, Patches { system_addresses::assert_aptos_framework(fx); let observed_jwks = borrow_global_mut(@aptos_framework); - vector::for_each(provider_jwks_vec, |obj| { - let provider_jwks: ProviderJWKs = obj; - upsert_provider_jwks(&mut observed_jwks.jwks, provider_jwks); - }); + + if (features::is_jwk_consensus_per_key_mode_enabled()) { + vector::for_each(provider_jwks_vec, |proposed_provider_jwks|{ + let maybe_cur_issuer_jwks = remove_issuer(&mut observed_jwks.jwks, proposed_provider_jwks.issuer); + let cur_issuer_jwks = if (option::is_some(&maybe_cur_issuer_jwks)) { + option::extract(&mut maybe_cur_issuer_jwks) + } else { + ProviderJWKs { + issuer: proposed_provider_jwks.issuer, + version: 0, + jwks: vector[], + } + }; + assert!(cur_issuer_jwks.version + 1 == proposed_provider_jwks.version, error::invalid_argument(EUNEXPECTED_VERSION)); + vector::for_each(proposed_provider_jwks.jwks, |jwk|{ + let variant_type_name = *string::bytes(copyable_any::type_name(&jwk.variant)); + let is_delete = if (variant_type_name == b"0x1::jwks::UnsupportedJWK") { + let repr = copyable_any::unpack(jwk.variant); + &repr.payload == &DELETE_COMMAND_INDICATOR + } else { + false + }; + if (is_delete) { + remove_jwk(&mut cur_issuer_jwks, get_jwk_id(&jwk)); + } else { + upsert_jwk(&mut cur_issuer_jwks, jwk); + } + }); + cur_issuer_jwks.version = cur_issuer_jwks.version + 1; + upsert_provider_jwks(&mut observed_jwks.jwks, cur_issuer_jwks); + }); + } else { + vector::for_each(provider_jwks_vec, |provider_jwks| { + upsert_provider_jwks(&mut observed_jwks.jwks, provider_jwks); + }); + }; let epoch = reconfiguration::current_epoch(); emit(ObservedJWKsUpdated { epoch, jwks: observed_jwks.jwks }); @@ -382,7 +530,7 @@ module aptos_framework::jwks { *borrow_global_mut(@aptos_framework) = PatchedJWKs { jwks }; } - /// Get a JWK by issuer and key ID from a `AllProvidersJWKs`, if it exists. + /// Get a JWK by issuer and key ID from an `AllProvidersJWKs`, if it exists. fun try_get_jwk_by_issuer(jwks: &AllProvidersJWKs, issuer: vector, jwk_id: vector): Option { let (issuer_found, index) = vector::find(&jwks.entries, |obj| { let provider_jwks: &ProviderJWKs = obj; @@ -576,6 +724,7 @@ module aptos_framework::jwks { #[test(fx = @aptos_framework)] fun test_observed_jwks_operations(fx: &signer) acquires ObservedJWKs, PatchedJWKs, Patches { initialize_for_test(fx); + features::change_feature_flags_for_testing(fx, vector[], vector[features::get_jwk_consensus_per_key_mode_feature()]); let jwk_0 = new_unsupported_jwk(b"key_id_0", b"key_payload_0"); let jwk_1 = new_unsupported_jwk(b"key_id_1", b"key_payload_1"); let jwk_2 = new_unsupported_jwk(b"key_id_2", b"key_payload_2"); @@ -619,6 +768,91 @@ module aptos_framework::jwks { assert!(expected == borrow_global(@aptos_framework).jwks, 4); } + #[test(fx = @aptos_framework)] + fun test_observed_jwks_operations_per_key_mode(fx: &signer) acquires ObservedJWKs, PatchedJWKs, Patches { + initialize_for_test(fx); + features::change_feature_flags_for_testing(fx, vector[features::get_jwk_consensus_per_key_mode_feature()], vector[]); + + let mandatory_jwk= new_rsa_jwk( + utf8(b"kid999"), + utf8(b"RS256"), + utf8(b"AQAB"), + utf8(b"999999999"), + ); + + set_patches(fx, vector[new_patch_upsert_jwk(b"alice", mandatory_jwk)]); + + // Insert a key. + let alice_jwk_1 = new_rsa_jwk( + utf8(b"kid123"), + utf8(b"RS256"), + utf8(b"AQAB"), + utf8(b"999999999"), + ); + let key_level_update_0 = ProviderJWKs { + issuer: b"alice", + version: 1, + jwks: vector[alice_jwk_1], + }; + upsert_into_observed_jwks(fx, vector[key_level_update_0]); + let expected = AllProvidersJWKs { + entries: vector[ + ProviderJWKs { + issuer: b"alice", + version: 1, + jwks: vector[alice_jwk_1, mandatory_jwk], + }, + ] + }; + assert!(expected == borrow_global(@aptos_framework).jwks, 999); + + // Update a key. + let alice_jwk_1b = new_rsa_jwk( + utf8(b"kid123"), + utf8(b"RS256"), + utf8(b"AQAB"), + utf8(b"88888888"), + ); + let key_level_update_1 = ProviderJWKs { + issuer: b"alice", + version: 2, + jwks: vector[alice_jwk_1b], + }; + upsert_into_observed_jwks(fx, vector[key_level_update_1]); + let expected = AllProvidersJWKs { + entries: vector[ + ProviderJWKs { + issuer: b"alice", + version: 2, + jwks: vector[alice_jwk_1b, mandatory_jwk], + }, + ] + }; + assert!(expected == borrow_global(@aptos_framework).jwks, 999); + + // Delete a key. + let delete_command = new_unsupported_jwk( + b"kid123", + DELETE_COMMAND_INDICATOR, + ); + let key_level_update_1 = ProviderJWKs { + issuer: b"alice", + version: 3, + jwks: vector[delete_command], + }; + upsert_into_observed_jwks(fx, vector[key_level_update_1]); + let expected = AllProvidersJWKs { + entries: vector[ + ProviderJWKs { + issuer: b"alice", + version: 3, + jwks: vector[mandatory_jwk], + }, + ] + }; + assert!(expected == borrow_global(@aptos_framework).jwks, 999); + } + #[test] fun test_apply_patch() { let jwks = AllProvidersJWKs { @@ -729,13 +963,20 @@ module aptos_framework::jwks { #[test(aptos_framework = @aptos_framework)] fun test_patched_jwks(aptos_framework: signer) acquires ObservedJWKs, PatchedJWKs, Patches { initialize_for_test(&aptos_framework); + + features::change_feature_flags_for_testing( + &aptos_framework, + vector[], + vector[features::get_jwk_consensus_per_key_mode_feature()] + ); + let jwk_0 = new_unsupported_jwk(b"key_id_0", b"key_payload_0"); let jwk_1 = new_unsupported_jwk(b"key_id_1", b"key_payload_1"); let jwk_2 = new_unsupported_jwk(b"key_id_2", b"key_payload_2"); let jwk_3 = new_unsupported_jwk(b"key_id_3", b"key_payload_3"); let jwk_3b = new_unsupported_jwk(b"key_id_3", b"key_payload_3b"); - // Fake observation from validators. + // Insert fake observation in per-issuer mode. upsert_into_observed_jwks(&aptos_framework, vector [ ProviderJWKs { issuer: b"alice", diff --git a/aptos-move/framework/aptos-framework/sources/keyless_account.move b/aptos-move/framework/aptos-framework/sources/keyless_account.move index 269c209b228b9..63f4b5874e257 100644 --- a/aptos-move/framework/aptos-framework/sources/keyless_account.move +++ b/aptos-move/framework/aptos-framework/sources/keyless_account.move @@ -224,7 +224,7 @@ module aptos_framework::keyless_account { }; let config = if (config_buffer::does_exist()) { - config_buffer::extract() + config_buffer::extract_v2() } else { *borrow_global(signer::address_of(fx)) }; @@ -240,7 +240,7 @@ module aptos_framework::keyless_account { system_addresses::assert_aptos_framework(fx); let config = if (config_buffer::does_exist()) { - config_buffer::extract() + config_buffer::extract_v2() } else { *borrow_global(signer::address_of(fx)) }; @@ -259,7 +259,7 @@ module aptos_framework::keyless_account { system_addresses::assert_aptos_framework(fx); let config = if (config_buffer::does_exist()) { - config_buffer::extract() + config_buffer::extract_v2() } else { *borrow_global(signer::address_of(fx)) }; @@ -269,7 +269,7 @@ module aptos_framework::keyless_account { set_configuration_for_next_epoch(fx, config); } - /// Convenience method to queue up an append to to the set of override `aud`'s. The change will only be effective + /// Convenience method to queue up an append to the set of override `aud`'s. The change will only be effective /// after reconfiguration. Only callable via governance proposal. /// /// WARNING: If a malicious override `aud` is set, this *could* lead to stolen funds. @@ -277,7 +277,7 @@ module aptos_framework::keyless_account { system_addresses::assert_aptos_framework(fx); let config = if (config_buffer::does_exist()) { - config_buffer::extract() + config_buffer::extract_v2() } else { *borrow_global(signer::address_of(fx)) }; @@ -292,7 +292,7 @@ module aptos_framework::keyless_account { system_addresses::assert_aptos_framework(fx); if (config_buffer::does_exist()) { - let vk = config_buffer::extract(); + let vk = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = vk; } else { @@ -301,7 +301,7 @@ module aptos_framework::keyless_account { }; if (config_buffer::does_exist()) { - let config = config_buffer::extract(); + let config = config_buffer::extract_v2(); if (exists(@aptos_framework)) { *borrow_global_mut(@aptos_framework) = config; } else { diff --git a/aptos-move/framework/aptos-framework/sources/managed_coin.move b/aptos-move/framework/aptos-framework/sources/managed_coin.move index d2932ddb4edd7..99b34f0817b04 100644 --- a/aptos-move/framework/aptos-framework/sources/managed_coin.move +++ b/aptos-move/framework/aptos-framework/sources/managed_coin.move @@ -6,7 +6,9 @@ module aptos_framework::managed_coin { use std::error; use std::signer; - use aptos_framework::coin::{Self, BurnCapability, FreezeCapability, MintCapability}; + use aptos_framework::coin::{Self, BurnCapability, FreezeCapability, MintCapability, destroy_burn_cap, + destroy_freeze_cap, destroy_mint_cap + }; // // Errors @@ -97,6 +99,32 @@ module aptos_framework::managed_coin { coin::register(account); } + /// Destroys capabilities from the account, so that the user no longer has access to mint or burn. + public entry fun destroy_caps(account: &signer) acquires Capabilities { + let (burn_cap, freeze_cap, mint_cap) = remove_caps(account); + destroy_burn_cap(burn_cap); + destroy_freeze_cap(freeze_cap); + destroy_mint_cap(mint_cap); + } + + /// Removes capabilities from the account to be stored or destroyed elsewhere + public fun remove_caps( + account: &signer + ): (BurnCapability, FreezeCapability, MintCapability) acquires Capabilities { + let account_addr = signer::address_of(account); + assert!( + exists>(account_addr), + error::not_found(ENO_CAPABILITIES), + ); + + let Capabilities { + burn_cap, + freeze_cap, + mint_cap, + } = move_from>(account_addr); + (burn_cap, freeze_cap, mint_cap) + } + // // Tests // @@ -110,8 +138,9 @@ module aptos_framework::managed_coin { #[test_only] struct FakeMoney {} - #[test(source = @0xa11ce, destination = @0xb0b, mod_account = @0x1)] + #[test(framework = @aptos_framework, source = @0xa11ce, destination = @0xb0b, mod_account = @0x1)] public entry fun test_end_to_end( + framework: signer, source: signer, destination: signer, mod_account: signer @@ -122,6 +151,7 @@ module aptos_framework::managed_coin { aptos_framework::account::create_account_for_test(destination_addr); aptos_framework::account::create_account_for_test(signer::address_of(&mod_account)); aggregator_factory::initialize_aggregator_factory_for_test(&mod_account); + aptos_framework::coin::create_coin_conversion_map(&framework); initialize( &mod_account, @@ -156,11 +186,46 @@ module aptos_framework::managed_coin { let new_supply = coin::supply(); assert!(option::extract(&mut new_supply) == 20, 2); + + // Destroy mint capabilities + destroy_caps(&mod_account); + assert!(!exists>(signer::address_of(&mod_account)), 3); } #[test(source = @0xa11ce, destination = @0xb0b, mod_account = @0x1)] + public entry fun test_end_to_end_caps_removal( + source: signer, + destination: signer, + mod_account: signer + ) acquires Capabilities { + let source_addr = signer::address_of(&source); + let destination_addr = signer::address_of(&destination); + aptos_framework::account::create_account_for_test(source_addr); + aptos_framework::account::create_account_for_test(destination_addr); + aptos_framework::account::create_account_for_test(signer::address_of(&mod_account)); + aggregator_factory::initialize_aggregator_factory_for_test(&mod_account); + + initialize( + &mod_account, + b"Fake Money", + b"FMD", + 10, + true + ); + assert!(coin::is_coin_initialized(), 0); + + // Remove capabilities + let (burn_cap, freeze_cap, mint_cap) = remove_caps(&mod_account); + assert!(!exists>(signer::address_of(&mod_account)), 3); + coin::destroy_mint_cap(mint_cap); + coin::destroy_freeze_cap(freeze_cap); + coin::destroy_burn_cap(burn_cap); + } + + #[test(framework = @aptos_framework, source = @0xa11ce, destination = @0xb0b, mod_account = @0x1)] #[expected_failure(abort_code = 0x60001, location = Self)] public entry fun fail_mint( + framework: signer, source: signer, destination: signer, mod_account: signer, @@ -171,6 +236,8 @@ module aptos_framework::managed_coin { aptos_framework::account::create_account_for_test(signer::address_of(&destination)); aptos_framework::account::create_account_for_test(signer::address_of(&mod_account)); aggregator_factory::initialize_aggregator_factory_for_test(&mod_account); + aptos_framework::coin::create_coin_conversion_map(&framework); + initialize(&mod_account, b"Fake money", b"FMD", 1, true); coin::register(&mod_account); @@ -180,9 +247,10 @@ module aptos_framework::managed_coin { mint(&destination, source_addr, 100); } - #[test(source = @0xa11ce, destination = @0xb0b, mod_account = @0x1)] + #[test(framework = @aptos_framework, source = @0xa11ce, destination = @0xb0b, mod_account = @0x1)] #[expected_failure(abort_code = 0x60001, location = Self)] public entry fun fail_burn( + framework: signer, source: signer, destination: signer, mod_account: signer, @@ -193,6 +261,8 @@ module aptos_framework::managed_coin { aptos_framework::account::create_account_for_test(signer::address_of(&destination)); aptos_framework::account::create_account_for_test(signer::address_of(&mod_account)); aggregator_factory::initialize_aggregator_factory_for_test(&mod_account); + aptos_framework::coin::create_coin_conversion_map(&framework); + initialize(&mod_account, b"Fake money", b"FMD", 1, true); coin::register(&mod_account); diff --git a/aptos-move/framework/aptos-framework/sources/managed_coin.spec.move b/aptos-move/framework/aptos-framework/sources/managed_coin.spec.move index e6eafd0904c11..c85b4e70bbedd 100644 --- a/aptos-move/framework/aptos-framework/sources/managed_coin.spec.move +++ b/aptos-move/framework/aptos-framework/sources/managed_coin.spec.move @@ -47,7 +47,7 @@ spec aptos_framework::managed_coin { /// spec module { pragma verify = true; - pragma aborts_if_is_strict; + pragma aborts_if_is_partial; } spec burn( @@ -147,4 +147,16 @@ spec aptos_framework::managed_coin { aborts_if !exists>(account_addr) && !type_info::spec_is_struct(); ensures exists>(account_addr); } + + spec remove_caps(account: &signer): (BurnCapability, FreezeCapability, MintCapability) { + let account_addr = signer::address_of(account); + aborts_if !exists>(account_addr); + ensures !exists>(account_addr); + } + + spec destroy_caps (account: &signer) { + let account_addr = signer::address_of(account); + aborts_if !exists>(account_addr); + ensures !exists>(account_addr); + } } diff --git a/aptos-move/framework/aptos-framework/sources/multisig_account.move b/aptos-move/framework/aptos-framework/sources/multisig_account.move index 6ea72d7e0ed0d..8e40a86664461 100644 --- a/aptos-move/framework/aptos-framework/sources/multisig_account.move +++ b/aptos-move/framework/aptos-framework/sources/multisig_account.move @@ -457,14 +457,14 @@ module aptos_framework::multisig_account { #[view] /// Return the id of the last transaction that was executed (successful or failed) or removed. public fun last_resolved_sequence_number(multisig_account: address): u64 acquires MultisigAccount { - let multisig_account_resource = borrow_global_mut(multisig_account); + let multisig_account_resource = borrow_global(multisig_account); multisig_account_resource.last_executed_sequence_number } #[view] /// Return the id of the next transaction created. public fun next_sequence_number(multisig_account: address): u64 acquires MultisigAccount { - let multisig_account_resource = borrow_global_mut(multisig_account); + let multisig_account_resource = borrow_global(multisig_account); multisig_account_resource.next_sequence_number } @@ -472,7 +472,7 @@ module aptos_framework::multisig_account { /// Return a bool tuple indicating whether an owner has voted and if so, whether they voted yes or no. public fun vote( multisig_account: address, sequence_number: u64, owner: address): (bool, bool) acquires MultisigAccount { - let multisig_account_resource = borrow_global_mut(multisig_account); + let multisig_account_resource = borrow_global(multisig_account); assert!( sequence_number > 0 && sequence_number < multisig_account_resource.next_sequence_number, error::invalid_argument(EINVALID_SEQUENCE_NUMBER), @@ -486,7 +486,7 @@ module aptos_framework::multisig_account { #[view] public fun available_transaction_queue_capacity(multisig_account: address): u64 acquires MultisigAccount { - let multisig_account_resource = borrow_global_mut(multisig_account); + let multisig_account_resource = borrow_global(multisig_account); let num_pending_transactions = multisig_account_resource.next_sequence_number - multisig_account_resource.last_executed_sequence_number - 1; if (num_pending_transactions > MAX_PENDING_TRANSACTIONS) { 0 @@ -497,6 +497,30 @@ module aptos_framework::multisig_account { ////////////////////////// Multisig account creation functions /////////////////////////////// + /// Private entry function that creates a new multisig account on top of an existing account. + /// + /// This offers a migration path for an existing account with any type of auth key. + /// + /// Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth + /// key after they are fully migrated to the new multisig account. Alternatively, they can call + /// create_with_existing_account_and_revoke_auth_key_call instead. + entry fun create_with_existing_account_call( + multisig_account: &signer, + owners: vector
, + num_signatures_required: u64, + metadata_keys: vector, + metadata_values: vector>, + ) acquires MultisigAccount { + create_with_owners_internal( + multisig_account, + owners, + num_signatures_required, + option::none(), + metadata_keys, + metadata_values, + ); + } + /// Creates a new multisig account on top of an existing account. /// /// This offers a migration path for an existing account with a multi-ed25519 auth key (native multisig account). @@ -547,6 +571,41 @@ module aptos_framework::multisig_account { ); } + /// Private entry function that creates a new multisig account on top of an existing account and immediately rotate + /// the origin auth key to 0x0. + /// + /// Note: If the original account is a resource account, this does not revoke all control over it as if any + /// SignerCapability of the resource account still exists, it can still be used to generate the signer for the + /// account. + entry fun create_with_existing_account_and_revoke_auth_key_call( + multisig_account: &signer, + owners: vector
, + num_signatures_required: u64, + metadata_keys: vector, + metadata_values:vector>, + ) acquires MultisigAccount { + create_with_owners_internal( + multisig_account, + owners, + num_signatures_required, + option::none(), + metadata_keys, + metadata_values, + ); + + // Rotate the account's auth key to 0x0, which effectively revokes control via auth key. + let multisig_address = address_of(multisig_account); + account::rotate_authentication_key_internal(multisig_account, ZERO_AUTH_KEY); + // This also needs to revoke any signer capability or rotation capability that exists for the account to + // completely remove all access to the account. + if (account::is_signer_capability_offered(multisig_address)) { + account::revoke_any_signer_capability(multisig_account); + }; + if (account::is_rotation_capability_offered(multisig_address)) { + account::revoke_any_rotation_capability(multisig_account); + }; + } + /// Creates a new multisig account on top of an existing account and immediately rotate the origin auth key to 0x0. /// /// Note: If the original account is a resource account, this does not revoke all control over it as if any @@ -877,14 +936,15 @@ module aptos_framework::multisig_account { new_metadata: multisig_account_resource.metadata, } ) + } else { + emit_event( + &mut multisig_account_resource.metadata_updated_events, + MetadataUpdatedEvent { + old_metadata, + new_metadata: multisig_account_resource.metadata, + } + ); }; - emit_event( - &mut multisig_account_resource.metadata_updated_events, - MetadataUpdatedEvent { - old_metadata, - new_metadata: multisig_account_resource.metadata, - } - ); }; } @@ -981,15 +1041,16 @@ module aptos_framework::multisig_account { approved, } ); + } else { + emit_event( + &mut multisig_account_resource.vote_events, + VoteEvent { + owner: owner_addr, + sequence_number, + approved, + } + ); }; - emit_event( - &mut multisig_account_resource.vote_events, - VoteEvent { - owner: owner_addr, - sequence_number, - approved, - } - ); } /// Generic function that can be used to either approve or reject a multisig transaction @@ -1043,15 +1104,16 @@ module aptos_framework::multisig_account { executor: address_of(owner), } ); + } else { + emit_event( + &mut multisig_account_resource.execute_rejected_transaction_events, + ExecuteRejectedTransactionEvent { + sequence_number, + num_rejections, + executor: owner_addr, + } + ); }; - emit_event( - &mut multisig_account_resource.execute_rejected_transaction_events, - ExecuteRejectedTransactionEvent { - sequence_number, - num_rejections, - executor: owner_addr, - } - ); } /// Remove the next transactions until the final_sequence_number if they have sufficient owner rejections. @@ -1139,16 +1201,17 @@ module aptos_framework::multisig_account { executor, } ); + } else { + emit_event( + &mut multisig_account_resource.execute_transaction_events, + TransactionExecutionSucceededEvent { + sequence_number: multisig_account_resource.last_executed_sequence_number, + transaction_payload, + num_approvals, + executor, + } + ); }; - emit_event( - &mut multisig_account_resource.execute_transaction_events, - TransactionExecutionSucceededEvent { - sequence_number: multisig_account_resource.last_executed_sequence_number, - transaction_payload, - num_approvals, - executor, - } - ); } /// Post-execution cleanup for a failed multisig transaction execution. @@ -1172,17 +1235,18 @@ module aptos_framework::multisig_account { execution_error, } ); + } else { + emit_event( + &mut multisig_account_resource.transaction_execution_failed_events, + TransactionExecutionFailedEvent { + executor, + sequence_number: multisig_account_resource.last_executed_sequence_number, + transaction_payload, + num_approvals, + execution_error, + } + ); }; - emit_event( - &mut multisig_account_resource.transaction_execution_failed_events, - TransactionExecutionFailedEvent { - executor, - sequence_number: multisig_account_resource.last_executed_sequence_number, - transaction_payload, - num_approvals, - execution_error, - } - ); } ////////////////////////// Private functions /////////////////////////////// @@ -1204,16 +1268,17 @@ module aptos_framework::multisig_account { approved: true, } ); + } else { + emit_event( + &mut multisig_account_resource.vote_events, + VoteEvent { + owner: executor, + sequence_number, + approved: true, + } + ); }; num_approvals = num_approvals + 1; - emit_event( - &mut multisig_account_resource.vote_events, - VoteEvent { - owner: executor, - sequence_number, - approved: true, - } - ); }; num_approvals @@ -1251,11 +1316,12 @@ module aptos_framework::multisig_account { emit( CreateTransaction { multisig_account: multisig_account, creator, sequence_number, transaction } ); + } else { + emit_event( + &mut multisig_account_resource.create_transaction_events, + CreateTransactionEvent { creator, sequence_number, transaction }, + ); }; - emit_event( - &mut multisig_account_resource.create_transaction_events, - CreateTransactionEvent { creator, sequence_number, transaction }, - ); } fun create_multisig_account(owner: &signer): (signer, SignerCapability) { @@ -1383,11 +1449,12 @@ module aptos_framework::multisig_account { ); if (std::features::module_event_migration_enabled()) { emit(AddOwners { multisig_account: multisig_address, owners_added: new_owners }); + } else { + emit_event( + &mut multisig_account_ref_mut.add_owners_events, + AddOwnersEvent { owners_added: new_owners } + ); }; - emit_event( - &mut multisig_account_ref_mut.add_owners_events, - AddOwnersEvent { owners_added: new_owners } - ); }; // If owners to remove provided, try to remove them. if (vector::length(&owners_to_remove) > 0) { @@ -1409,11 +1476,12 @@ module aptos_framework::multisig_account { emit( RemoveOwners { multisig_account: multisig_address, owners_removed } ); + } else { + emit_event( + &mut multisig_account_ref_mut.remove_owners_events, + RemoveOwnersEvent { owners_removed } + ); }; - emit_event( - &mut multisig_account_ref_mut.remove_owners_events, - RemoveOwnersEvent { owners_removed } - ); } }; // If new signature count provided, try to update count. @@ -1438,14 +1506,15 @@ module aptos_framework::multisig_account { new_num_signatures_required, } ); - }; - emit_event( - &mut multisig_account_ref_mut.update_signature_required_events, - UpdateSignaturesRequiredEvent { - old_num_signatures_required, - new_num_signatures_required, - } - ); + } else { + emit_event( + &mut multisig_account_ref_mut.update_signature_required_events, + UpdateSignaturesRequiredEvent { + old_num_signatures_required, + new_num_signatures_required, + } + ); + } } }; // Verify number of owners. @@ -1688,6 +1757,26 @@ module aptos_framework::multisig_account { ); } + #[test] + public entry fun test_create_multisig_account_on_top_of_existing_with_signer() + acquires MultisigAccount { + setup(); + + let multisig_address = @0xabc; + create_account(multisig_address); + + let expected_owners = vector[@0x123, @0x124, @0x125]; + create_with_existing_account_call( + &create_signer(multisig_address), + expected_owners, + 2, + vector[], + vector[], + ); + assert_multisig_account_exists(multisig_address); + assert!(owners(multisig_address) == expected_owners, 0); + } + #[test] public entry fun test_create_multisig_account_on_top_of_existing_multi_ed25519_account() acquires MultisigAccount { @@ -1721,6 +1810,34 @@ module aptos_framework::multisig_account { assert!(owners(multisig_address) == expected_owners, 0); } + #[test] + public entry fun test_create_multisig_account_on_top_of_existing_and_revoke_auth_key_with_signer() + acquires MultisigAccount { + setup(); + + let multisig_address = @0xabc; + create_account(multisig_address); + + // Create both a signer capability and rotation capability offers + account::set_rotation_capability_offer(multisig_address, @0x123); + account::set_signer_capability_offer(multisig_address, @0x123); + + let expected_owners = vector[@0x123, @0x124, @0x125]; + create_with_existing_account_and_revoke_auth_key_call( + &create_signer(multisig_address), + expected_owners, + 2, + vector[], + vector[], + ); + assert_multisig_account_exists(multisig_address); + assert!(owners(multisig_address) == expected_owners, 0); + assert!(account::get_authentication_key(multisig_address) == ZERO_AUTH_KEY, 1); + // Verify that all capability offers have been wiped. + assert!(!account::is_rotation_capability_offered(multisig_address), 2); + assert!(!account::is_signer_capability_offered(multisig_address), 3); + } + #[test] public entry fun test_create_multisig_account_on_top_of_existing_multi_ed25519_account_and_revoke_auth_key() acquires MultisigAccount { diff --git a/aptos-move/framework/aptos-framework/sources/multisig_account.spec.move b/aptos-move/framework/aptos-framework/sources/multisig_account.spec.move index 1e2b60c3724ed..2acfabe1c088d 100644 --- a/aptos-move/framework/aptos-framework/sources/multisig_account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/multisig_account.spec.move @@ -207,7 +207,8 @@ spec aptos_framework::multisig_account { } spec get_next_multisig_account_address(creator: address): address { - aborts_if !exists(creator); + //pragma aborts_if_is_partial; + //aborts_if !exists(creator); let owner_nonce = global(creator).sequence_number; } diff --git a/aptos-move/framework/aptos-framework/sources/native_bridge.move b/aptos-move/framework/aptos-framework/sources/native_bridge.move deleted file mode 100644 index 4f8e5c2fca1d5..0000000000000 --- a/aptos-move/framework/aptos-framework/sources/native_bridge.move +++ /dev/null @@ -1,436 +0,0 @@ -module aptos_framework::native_bridge { - use aptos_std::smart_table::SmartTable; - use aptos_framework::ethereum::EthereumAddress; - use aptos_framework::event::EventHandle; - use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::coin::{BurnCapability, MintCapability}; - use aptos_framework::fungible_asset::{BurnRef, MintRef}; - - const ETRANSFER_ALREADY_PROCESSED: u64 = 1; - const EINVALID_BRIDGE_TRANSFER_ID: u64 = 2; - const EEVENT_NOT_FOUND: u64 = 3; - const EINVALID_NONCE: u64 = 4; - const EINVALID_AMOUNT: u64 = 5; - const ENONCE_NOT_FOUND: u64 = 6; - const EZERO_AMOUNT: u64 = 7; - const ENATIVE_BRIDGE_NOT_ENABLED: u64 = 8; - const EINCORRECT_NONCE: u64 = 9; - const EID_NOT_FOUND: u64 = 10; - const EINVALID_BRIDGE_RELAYER: u64 = 11; - const ESAME_FEE: u64 = 0x2; - const EINVALID_VALUE: u64 = 0x3; - const ERATE_LIMIT_EXCEEDED: u64 = 0x4; - - friend aptos_framework::genesis; - - #[event] - /// Event emitted when the bridge relayer is updated. - struct BridgeConfigRelayerUpdated has store, drop { - old_relayer: address, - new_relayer: address, - } - - #[event] - /// An event triggered upon change of bridgefee - struct BridgeFeeChangedEvent has store, drop { - old_bridge_fee: u64, - new_bridge_fee: u64, - } - - #[event] - /// An event triggered upon change of insurance budget divider - struct BridgeInsuranceBudgetDividerChangedEvent has store, drop { - old_insurance_budget_divider: u64, - new_insurance_budget_divider: u64, - } - - #[event] - /// An event triggered upon change of insurance fund - struct BridgeInsuranceFundChangedEvent has store, drop { - old_insurance_fund: address, - new_insurance_fund: address, - } - - #[event] - /// An event triggered upon initiating a bridge transfer - struct BridgeTransferInitiatedEvent has store, drop { - bridge_transfer_id: vector, - initiator: address, - recipient: vector, - amount: u64, - nonce: u64, - } - - #[event] - /// An event triggered upon completing a bridge transfer - struct BridgeTransferCompletedEvent has store, drop { - bridge_transfer_id: vector, - initiator: vector, - recipient: address, - amount: u64, - nonce: u64, - } - - /// This struct will store the event handles for bridge events. - struct BridgeEvents has key, store { - bridge_transfer_initiated_events: EventHandle, - bridge_transfer_completed_events: EventHandle, - } - - struct AptosCoinBurnCapability has key { - burn_cap: BurnCapability, - } - - struct AptosCoinMintCapability has key { - mint_cap: MintCapability, - } - - struct AptosFABurnCapabilities has key { - burn_ref: BurnRef, - } - - struct AptosFAMintCapabilities has key { - burn_ref: MintRef, - } - - /// A nonce to ensure the uniqueness of bridge transfers - struct Nonce has key { - value: u64 - } - - struct OutboundRateLimitBudget has key, store { - day: SmartTable, - } - - struct InboundRateLimitBudget has key, store { - day: SmartTable, - } - - /// A smart table wrapper - struct SmartTableWrapper has key, store { - inner: SmartTable, - } - - /// Details on the outbound transfer - struct OutboundTransfer has store, copy { - bridge_transfer_id: vector, - initiator: address, - recipient: EthereumAddress, - amount: u64, - } - - struct BridgeConfig has key { - bridge_relayer: address, - insurance_fund: address, - insurance_budget_divider: u64, - bridge_fee: u64, - } - - /// Initializes the module and stores the `EventHandle`s in the resource. - public fun initialize(_aptos_framework: &signer) { - - } - - /// Converts a u64 to a 32-byte vector. - /// - /// @param value The u64 value to convert. - /// @return A 32-byte vector containing the u64 value in little-endian order. - /// - /// How BCS works: https://github.com/zefchain/bcs?tab=readme-ov-file#booleans-and-integers - /// - /// @example: a u64 value 0x12_34_56_78_ab_cd_ef_00 is converted to a 32-byte vector: - /// [0x00, 0x00, ..., 0x00, 0x12, 0x34, 0x56, 0x78, 0xab, 0xcd, 0xef, 0x00] - public(friend) fun normalize_u64_to_32_bytes(_value: &u64): vector { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Checks if a bridge transfer ID is associated with an inbound nonce. - /// @param bridge_transfer_id The bridge transfer ID. - /// @return `true` if the ID is associated with an existing inbound nonce, `false` otherwise. - public(friend) fun is_inbound_nonce_set(_bridge_transfer_id: vector): bool { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Creates bridge transfer details with validation. - /// - /// @param initiator The initiating party of the transfer. - /// @param recipient The receiving party of the transfer. - /// @param amount The amount to be transferred. - /// @param nonce The unique nonce for the transfer. - /// @return A `BridgeTransferDetails` object. - /// @abort If the amount is zero or locks are invalid. - public(friend) fun create_details(_initiator: address, _recipient: EthereumAddress, _amount: u64, _nonce: u64) - : OutboundTransfer { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Record details of an initiated transfer for quick lookup of details, mapping bridge transfer ID to transfer details - /// - /// @param bridge_transfer_id Bridge transfer ID. - /// @param details The bridge transfer details - public(friend) fun add(_nonce: u64, _details: OutboundTransfer) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Record details of a completed transfer, mapping bridge transfer ID to inbound nonce - /// - /// @param bridge_transfer_id Bridge transfer ID. - /// @param details The bridge transfer details - public(friend) fun set_bridge_transfer_id_to_inbound_nonce(_bridge_transfer_id: vector, _inbound_nonce: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Asserts that the bridge transfer ID is valid. - /// - /// @param bridge_transfer_id The bridge transfer ID to validate. - /// @abort If the ID is invalid. - public(friend) fun assert_valid_bridge_transfer_id(_bridge_transfer_id: &vector) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Generates a unique outbound bridge transfer ID based on transfer details and nonce. - /// - /// @param details The bridge transfer details. - /// @return The generated bridge transfer ID. - public(friend) fun bridge_transfer_id(_initiator: address, _recipient: EthereumAddress, _amount: u64, _nonce: u64) : vector { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - #[view] - /// Retrieves the address of the current bridge relayer. - /// - /// @return The address of the current bridge relayer. - public fun bridge_relayer(): address { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - #[view] - /// Retrieves the address of the current insurance fund. - /// - /// @return The address of the current insurance fund. - public fun insurance_fund(): address { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - #[view] - /// Retrieves the current insurance budget divider. - /// - /// @return The current insurance budget divider. - public fun insurance_budget_divider(): u64 { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - #[view] - /// Retrieves the current bridge fee. - /// - /// @return The current bridge fee. - public fun bridge_fee(): u64 { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - #[view] - /// Gets the bridge transfer details (`OutboundTransfer`) from the given nonce. - /// @param nonce The nonce of the bridge transfer. - /// @return The `OutboundTransfer` struct containing the transfer details. - /// @abort If the nonce is not found in the smart table. - public fun get_bridge_transfer_details_from_nonce(_nonce: u64): OutboundTransfer { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - #[view] - /// Gets inbound `nonce` from `bridge_transfer_id` - /// @param bridge_transfer_id The ID bridge transfer. - /// @return the nonce - /// @abort If the nonce is not found in the smart table. - public fun get_inbound_nonce_from_bridge_transfer_id(_bridge_transfer_id: vector): u64 { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Increment and get the current nonce - fun increment_and_get_nonce(): u64 { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - #[test_only] - /// Initializes the native bridge for testing purposes - /// - /// @param aptos_framework The signer representing the Aptos framework. - public fun initialize_for_test(_aptos_framework: &signer) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Stores the burn capability for AptosCoin, converting to a fungible asset reference if the feature is enabled. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param burn_cap The burn capability for AptosCoin. - public fun store_aptos_coin_burn_cap(_aptos_framework: &signer, _burn_cap: BurnCapability) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Stores the mint capability for AptosCoin. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param mint_cap The mint capability for AptosCoin. - public fun store_aptos_coin_mint_cap(_aptos_framework: &signer, _mint_cap: MintCapability) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Mints a specified amount of AptosCoin to a recipient's address. - /// - /// @param core_resource The signer representing the core resource account. - /// @param recipient The address of the recipient to mint coins to. - /// @param amount The amount of AptosCoin to mint. - public fun mint_to(_aptos_framework: &signer, _recipient: address, _amount: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Mints a specified amount of AptosCoin to a recipient's address. - /// - /// @param recipient The address of the recipient to mint coins to. - /// @param amount The amount of AptosCoin to mint. - /// @abort If the mint capability is not available. - public(friend) fun mint(_recipient: address, _amount: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Mints a specified amount of AptosCoin to a recipient's address. - /// - /// @param recipient The address of the recipient to mint coins to. - /// @param amount The amount of AptosCoin to mint. - fun mint_internal(_recipient: address, _amount: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Burns a specified amount of AptosCoin from an address. - /// - /// @param core_resource The signer representing the core resource account. - /// @param from The address from which to burn AptosCoin. - /// @param amount The amount of AptosCoin to burn. - /// @abort If the burn capability is not available. - public fun burn_from(_aptos_framework: &signer, _from: address, _amount: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Burns a specified amount of AptosCoin from an address. - /// - /// @param from The address from which to burn AptosCoin. - /// @param amount The amount of AptosCoin to burn. - /// @abort If the burn capability is not available. - public(friend) fun burn(_from: address, _amount: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Burns a specified amount of AptosCoin from an address. - /// - /// @param from The address from which to burn AptosCoin. - /// @param amount The amount of AptosCoin to burn. - fun burn_internal(_from: address, _amount: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Initiate a bridge transfer of MOVE from Movement to Ethereum - /// Anyone can initiate a bridge transfer from the source chain - /// The amount is burnt from the initiator and the module-level nonce is incremented - /// @param initiator The initiator's Ethereum address as a vector of bytes. - /// @param recipient The address of the recipient on the Aptos blockchain. - /// @param amount The amount of assets to be locked. - public entry fun initiate_bridge_transfer( - _initiator: &signer, - _recipient: vector, - _amount: u64 - ) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Completes a bridge transfer on the destination chain. - /// - /// @param caller The signer representing the bridge relayer. - /// @param initiator The initiator's Ethereum address as a vector of bytes. - /// @param bridge_transfer_id The unique identifier for the bridge transfer. - /// @param recipient The address of the recipient on the Aptos blockchain. - /// @param amount The amount of assets to be locked. - /// @param nonce The unique nonce for the transfer. - /// @abort If the caller is not the bridge relayer or the transfer has already been processed. - public entry fun complete_bridge_transfer( - _caller: &signer, - _bridge_transfer_id: vector, - _initiator: vector, - _recipient: address, - _amount: u64, - _nonce: u64 - ) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Charge bridge fee to the initiate bridge transfer. - /// - /// @param initiator The signer representing the initiator. - /// @param amount The amount to be charged. - /// @return The new amount after deducting the bridge fee. - fun charge_bridge_fee(_amount: u64): u64 { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Updates the bridge relayer, requiring governance validation. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param new_relayer The new address to be set as the bridge relayer. - /// @abort If the current relayer is the same as the new relayer. - public fun update_bridge_relayer(_aptos_framework: &signer, _new_relayer: address) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Updates the bridge fee, requiring relayer validation. - /// - /// @param relayer The signer representing the Relayer. - /// @param new_bridge_fee The new bridge fee to be set. - /// @abort If the new bridge fee is the same as the old bridge fee. - public entry fun update_bridge_fee(_relayer: &signer, _new_bridge_fee: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Updates the insurance fund, requiring governance validation. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param new_insurance_fund The new insurance fund to be set. - /// @abort If the new insurance fund is the same as the old insurance fund. - public entry fun update_insurance_fund(_aptos_framework: &signer, _new_insurance_fund: address) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Updates the insurance budget divider, requiring governance validation. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param new_insurance_budget_divider The new insurance budget divider to be set. - /// @abort If the new insurance budget divider is the same as the old insurance budget divider. - public entry fun update_insurance_budget_divider(_aptos_framework: &signer, _new_insurance_budget_divider: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Asserts that the caller is the current bridge relayer. - /// - /// @param caller The signer whose authority is being checked. - /// @abort If the caller is not the current bridge relayer. - public(friend) fun assert_is_caller_relayer(_caller: &signer) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Asserts that the rate limit budget is not exceeded. - /// - /// @param amount The amount to be transferred. - fun assert_outbound_rate_limit_budget_not_exceeded(_amount: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Asserts that the rate limit budget is not exceeded. - /// - /// @param amount The amount to be transferred. - fun assert_inbound_rate_limit_budget_not_exceeded(_amount: u64) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } - - /// Test serialization of u64 to 32 bytes - fun test_normalize_u64_to_32_bytes_helper(_x: u64, _expected: vector) { - abort ENATIVE_BRIDGE_NOT_ENABLED - } -} diff --git a/aptos-move/framework/aptos-framework/sources/native_bridge.spec.move b/aptos-move/framework/aptos-framework/sources/native_bridge.spec.move deleted file mode 100644 index 70df911b164fc..0000000000000 --- a/aptos-move/framework/aptos-framework/sources/native_bridge.spec.move +++ /dev/null @@ -1,281 +0,0 @@ -// spec aptos_framework::native_bridge { - -// // use std::features; -// // use aptos_framework::coin; - -// // spec plus1 { -// // pragma aborts_if_is_partial = false; -// // aborts_if !exists(@aptos_framework); -// // aborts_if global(@aptos_framework).v + 1 > MAX_U64; - -// // modifies global(@aptos_framework); -// // ensures result == old(global(@aptos_framework).v) + 1; -// // ensures global(@aptos_framework).v == old(global(@aptos_framework).v) + 1; -// // } - -// spec increment_and_get_nonce { -// pragma aborts_if_is_partial = true; -// modifies global(@aptos_framework); -// aborts_if !exists(@aptos_framework); -// aborts_if global(@aptos_framework).value + 1 > MAX_U64; -// ensures result == old(global(@aptos_framework).value) + 1; -// ensures global(@aptos_framework).value == old(global(@aptos_framework).value) + 1; -// } - -// // spec increment_and_get_nonce_at { -// // // pragma aborts_if_is_partial = true; -// // modifies global(a); -// // aborts_if !exists(a); -// // aborts_if global(a).value + 1 > MAX_U64; - -// // // aborts_with EXECUTION_FAILURE; -// // // ensures global(a).value == old(global(a).value) + 1; -// // } - -// // spec initialize(aptos_framework: &signer) { -// // aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); -// // aborts_if exists(signer::address_of(aptos_framework)); -// // aborts_if exists(signer::address_of(aptos_framework)); - -// // ensures exists(signer::address_of(aptos_framework)); -// // ensures global(signer::address_of(aptos_framework)).value == 1; - -// // ensures exists(signer::address_of(aptos_framework)); -// // ensures -// // global(signer::address_of(aptos_framework)) -// // .bridge_transfer_initiated_events.counter == 0; -// // ensures -// // global(signer::address_of(aptos_framework)) -// // .bridge_transfer_completed_events.counter == 0; -// // } - -// // spec increment_and_get_nonce { -// // aborts_if !exists(@aptos_framework); - -// // ensures global(@aptos_framework).value == old(global(@aptos_framework).value) + 1; -// // ensures result == global(@aptos_framework).value; -// // } - -// // spec initiate_bridge_transfer( -// // initiator: &signer, -// // recipient: vector, -// // amount: u64 -// // ) { -// // aborts_if amount == 0; -// // aborts_if !exists(@aptos_framework); -// // aborts_if !exists(@aptos_framework); - -// // ensures global(@aptos_framework).value == old(global(@aptos_framework).value) + 1; - -// // ensures -// // global(@aptos_framework).bridge_transfer_initiated_events.counter == -// // old( -// // global(@aptos_framework).bridge_transfer_initiated_events.counter -// // ) + 1; -// // } - -// // spec complete_bridge_transfer( -// // caller: &signer, -// // bridge_transfer_id: vector, -// // initiator: vector, -// // recipient: address, -// // amount: u64, -// // nonce: u64 -// // ) { -// // // Abort if the caller is not a relayer -// // aborts_if !exists(@aptos_framework); -// // aborts_if global(@aptos_framework).bridge_relayer != signer::address_of(caller); - -// // // Abort if the bridge transfer ID is already associated with an incoming nonce -// // aborts_if native_bridge_store::is_incoming_nonce_set(bridge_transfer_id); - -// // // Abort if the `BridgeEvents` resource does not exist -// // aborts_if !exists(@aptos_framework); - -// // // Ensure the bridge transfer ID is associated with an incoming nonce after execution -// // ensures native_bridge_store::is_incoming_nonce_set(bridge_transfer_id); - -// // // Ensure the event counter is incremented by 1 -// // ensures -// // global(@aptos_framework).bridge_transfer_completed_events.counter == -// // old( -// // global(@aptos_framework).bridge_transfer_completed_events.counter -// // ) + 1; -// // } -// } - -// spec aptos_framework::native_bridge_core { - -// spec initialize(aptos_framework: &signer) { -// pragma aborts_if_is_partial = true; - -// aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); -// // aborts_if exists(@aptos_framework); -// // aborts_if exists(@aptos_framework); - -// // ensures exists(@aptos_framework); -// // ensures exists(@aptos_framework); -// } -// // spec store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: BurnCapability) { -// // aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); -// // aborts_if exists(@aptos_framework); - -// // ensures exists(@aptos_framework); -// // } - -// // spec store_aptos_coin_mint_cap(aptos_framework: &signer, mint_cap: MintCapability) { -// // aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); -// // aborts_if exists(@aptos_framework); - -// // ensures exists(@aptos_framework); -// // } - -// // spec mint(recipient: address, amount: u64) { -// // aborts_if !exists(@aptos_framework); -// // aborts_if amount == 0; - -// // ensures coin::balance(recipient) == old(coin::balance(recipient)) + amount; -// // } - -// // spec burn(from: address, amount: u64) { -// // aborts_if !exists(@aptos_framework); -// // aborts_if coin::balance(from) < amount; - -// // ensures coin::balance(from) == old(coin::balance(from)) - amount; -// // } -// } - -// spec aptos_framework::native_bridge_store { - -// spec module { -// axiom forall x: u64: len(bcs::to_bytes(x)) == 8; -// axiom forall x: u256: len(bcs::to_bytes(x)) == 32; -// } - -// // req1. never aborts -// // req2. returns a 32-byte vector -// spec normalize_u64_to_32_bytes { -// aborts_if false; -// ensures len(result) == 32; -// } - - -// // spec bcs_u64 { -// // aborts_if false; -// // ensures len(result) == 8; -// // } - -// // spec ascii_hex_to_u8 { -// // requires ch >= 0x30 && ch <= 0x39 || ch >= 0x41 && ch <= 0x46 || ch >= 0x61 && ch <= 0x66; -// // } - -// // spec initialize(aptos_framework: &signer) { -// // aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); - -// // ensures exists>(@aptos_framework); -// // ensures exists, u64>>(@aptos_framework); -// // } - -// // spec is_incoming_nonce_set(bridge_transfer_id: vector): bool { -// // ensures result == exists, u64>>(@aptos_framework) -// // && smart_table::spec_contains( -// // global, u64>>(@aptos_framework).inner, -// // bridge_transfer_id -// // ); -// // } - -// // spec create_details( -// // initiator: address, -// // recipient: EthereumAddress, -// // amount: u64, -// // nonce: u64 -// // ): OutboundTransfer { -// // aborts_if amount == 0; - -// // ensures result.bridge_transfer_id == bridge_transfer_id( -// // initiator, -// // recipient, -// // amount, -// // nonce -// // ); -// // ensures result.initiator == initiator; -// // ensures result.recipient == recipient; -// // ensures result.amount == amount; -// // } - -// // spec add(nonce: u64, details: OutboundTransfer) { -// // aborts_if !exists>(@aptos_framework); -// // aborts_if smart_table::spec_contains( -// // global>(@aptos_framework).inner, -// // nonce -// // ); - -// // ensures smart_table::spec_contains( -// // global>(@aptos_framework).inner, -// // nonce -// // ); -// // ensures smart_table::spec_len( -// // global>(@aptos_framework).inner -// // ) == old(smart_table::spec_len( -// // global>(@aptos_framework).inner -// // )) + 1; -// // } - -// // spec set_bridge_transfer_id_to_inbound_nonce( -// // bridge_transfer_id: vector, -// // inbound_nonce: u64 -// // ) { -// // aborts_if !exists, u64>>(@aptos_framework); - -// // ensures smart_table::spec_contains( -// // global, u64>>(@aptos_framework).inner, -// // bridge_transfer_id -// // ); -// // } -// /* -// spec bridge_transfer_id( -// initiator: address, -// recipient: EthereumAddress, -// amount: u64, -// nonce: u64 -// ): vector { -// let combined_bytes = vec_empty(); -// combined_bytes = vector::append(combined_bytes, bcs::to_bytes(&initiator)); -// combined_bytes = vector::append(combined_bytes, bcs::to_bytes(&recipient)); -// combined_bytes = vector::append(combined_bytes, bcs::to_bytes(&amount)); -// combined_bytes = vector::append(combined_bytes, bcs::to_bytes(&nonce)); - -// ensures result == aptos_std::aptos_hash::keccak256(combined_bytes); -// } -// */ -// } - -// // spec aptos_framework::native_bridge_configuration { - -// // spec initialize(aptos_framework: &signer) { -// // aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); -// // aborts_if exists(signer::address_of(aptos_framework)); - -// // ensures exists(signer::address_of(aptos_framework)); -// // ensures global(signer::address_of(aptos_framework)).bridge_relayer == signer::address_of(aptos_framework); -// // } - -// // spec update_bridge_relayer(aptos_framework: &signer, new_relayer: address) { -// // aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); -// // aborts_if !exists(signer::address_of(aptos_framework)); -// // aborts_if global(signer::address_of(aptos_framework)).bridge_relayer == new_relayer; - -// // ensures global(signer::address_of(aptos_framework)).bridge_relayer == new_relayer; -// // } - -// // spec bridge_relayer(): address { -// // aborts_if !exists(@aptos_framework); - -// // ensures result == global(@aptos_framework).bridge_relayer; -// // } - -// // spec assert_is_caller_relayer(caller: &signer) { -// // aborts_if !exists(@aptos_framework); -// // aborts_if global(@aptos_framework).bridge_relayer != signer::address_of(caller); -// // } -// // } diff --git a/aptos-move/framework/aptos-framework/sources/nonce_validation.move b/aptos-move/framework/aptos-framework/sources/nonce_validation.move new file mode 100644 index 0000000000000..f9ff0eb3ab2de --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/nonce_validation.move @@ -0,0 +1,267 @@ +module aptos_framework::nonce_validation { + use aptos_std::table::{Self, Table}; + use aptos_std::timestamp; + use aptos_std::big_ordered_map::{Self, BigOrderedMap}; + use aptos_std::aptos_hash::sip_hash_from_value; + use aptos_std::error; + use aptos_framework::system_addresses; + friend aptos_framework::genesis; + friend aptos_framework::transaction_validation; + + + const NUM_BUCKETS: u64 = 50000; + + // After a transaction expires, we wait for NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS seconds + // before garbage collecting the transaction from the nonce history. + // We maintain an invariant that two transactions with the same (address, nonce) pair cannot be stored + // in the nonce hsitory if their transanction expiration times are less than + // `NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS` seconds apart. + const NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS: u64 = 65; + + + // Each time we check if an (address, nonce) pair can be inserted into nonce history, + // we try to initially garbage collect expired nonces in the bucket. This is a limit on the number of nonces + // we try to garbage collect in a single call. + const MAX_ENTRIES_GARBAGE_COLLECTED_PER_CALL: u64 = 5; + + // Nonce history resource does not exist. + const E_NONCE_HISTORY_DOES_NOT_EXIST: u64 = 1001; + + // Transaction expiration time is too far in the future. + const ETRANSACTION_EXPIRATION_TOO_FAR_IN_FUTURE: u64 = 1002; + + + // An orderless transaction is a transaction that doesn't have a sequence number. + // Orderless transactions instead contain a nonce to prevent replay attacks. + // If the incoming transaction has the same (address, nonce) pair as a previous unexpired transaction, it is rejected. + // The nonce history is used to store the list of (address, nonce, txn expiration time) values of all unexpired transactions. + // The nonce history is used in the transaction validation process to check if the incoming transaction is valid. + struct NonceHistory has key { + // Key = sip_hash(NonceKey) % NUM_BUCKETS + // Value = Bucket + nonce_table: Table, + // Used to facilitate prefill the nonce_table with empty buckets + // one by one using `add_nonce_bucket` method. + // This is the next_key to prefill with an empty bucket + next_key: u64, + } + + // The bucket stores (address, nonce, txn expiration time) tuples. + // All the entries in the bucket contain the same hash(address, nonce) % NUM_BUCKETS. + // The first big ordered map in the bucket stores (expiration time, address, nonce) -> true. + // The second big ordered map in the bucket stores (address, nonce) -> expiration time. + // Both the maps store the same data, just in a different format. + // As the key in the first big ordered map starts with expiration time, it's easy to figure out which + // entries have expired at the current time. The first big ordered map helps with easy garbage collection. + // The second big ordered map helps with checking if the given (address, nonce) pair exists in the bucket. + // An (address, nonce) pair is guaranteed to be unique in both the big ordered maps. Two transactions with + // the same (address, nonce) pair cannot be stored at the same time. + struct Bucket has store { + // The first big ordered map in the bucket stores (expiration time, address, nonce) -> true. + nonces_ordered_by_exp_time: BigOrderedMap, + // The second big ordered map in the bucket stores (address, nonce) -> expiration time. + nonce_to_exp_time_map: BigOrderedMap, + } + + struct NonceKeyWithExpTime has copy, drop, store { + txn_expiration_time: u64, + sender_address: address, + nonce: u64, + } + + struct NonceKey has copy, drop, store { + sender_address: address, + nonce: u64, + } + + public(friend) fun initialize(aptos_framework: &signer) { + initialize_nonce_table(aptos_framework); + } + + public entry fun initialize_nonce_table(aptos_framework: &signer) { + system_addresses::assert_aptos_framework(aptos_framework); + if (!exists(@aptos_framework)) { + let table = table::new(); + let nonce_history = NonceHistory { + nonce_table: table, + next_key: 0, + }; + move_to(aptos_framework, nonce_history); + }; + } + + fun empty_bucket(pre_allocate_slots: bool): Bucket { + let bucket = Bucket { + nonces_ordered_by_exp_time: big_ordered_map::new_with_reusable(), + nonce_to_exp_time_map: big_ordered_map::new_with_reusable(), + }; + + if (pre_allocate_slots) { + // Initiating big ordered maps with 5 pre-allocated storage slots. + // (expiration time, address, nonce) is together 48 bytes. + // A 4 KB storage slot can store 80+ such tuples. + // The 5 slots should be more than enough for the current use case. + bucket.nonces_ordered_by_exp_time.allocate_spare_slots(5); + bucket.nonce_to_exp_time_map.allocate_spare_slots(5); + }; + bucket + } + + // This method is used to prefill the nonce_table with empty buckets one by one. + public entry fun add_nonce_buckets(count: u64) acquires NonceHistory { + assert!(exists(@aptos_framework), error::invalid_state(E_NONCE_HISTORY_DOES_NOT_EXIST)); + let nonce_history = &mut NonceHistory[@aptos_framework]; + for (i in 0..count) { + if (nonce_history.next_key <= NUM_BUCKETS) { + if (!nonce_history.nonce_table.contains(nonce_history.next_key)) { + nonce_history.nonce_table.add( + nonce_history.next_key, + empty_bucket(true) + ); + }; + nonce_history.next_key = nonce_history.next_key + 1; + } + } + } + + // Returns true if the input (address, nonce) pair doesn't exist in the nonce history, and inserted into nonce history successfully. + // Returns false if the input (address, nonce) pair already exists in the nonce history. + public(friend) fun check_and_insert_nonce( + sender_address: address, + nonce: u64, + txn_expiration_time: u64, + ): bool acquires NonceHistory { + assert!(exists(@aptos_framework), error::invalid_state(E_NONCE_HISTORY_DOES_NOT_EXIST)); + // Check if the transaction expiration time is too far in the future. + assert!(txn_expiration_time <= timestamp::now_seconds() + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS, error::invalid_argument(ETRANSACTION_EXPIRATION_TOO_FAR_IN_FUTURE)); + let nonce_history = &mut NonceHistory[@aptos_framework]; + let nonce_key = NonceKey { + sender_address, + nonce, + }; + let bucket_index = sip_hash_from_value(&nonce_key) % NUM_BUCKETS; + let current_time = timestamp::now_seconds(); + if (!nonce_history.nonce_table.contains(bucket_index)) { + nonce_history.nonce_table.add( + bucket_index, + empty_bucket(false) + ); + }; + let bucket = table::borrow_mut(&mut nonce_history.nonce_table, bucket_index); + + let existing_exp_time = bucket.nonce_to_exp_time_map.get(&nonce_key); + if (existing_exp_time.is_some()) { + let existing_exp_time = existing_exp_time.extract(); + + // If the existing (address, nonce) pair has not expired, return false. + if (existing_exp_time >= current_time) { + return false; + }; + + // We maintain an invariant that two transaction with the same (address, nonce) pair cannot be stored + // in the nonce history if their transaction expiration times are less than `NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS` + // seconds apart. + if (txn_expiration_time <= existing_exp_time + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS) { + return false; + }; + + // If the existing (address, nonce) pair has expired, garbage collect it. + bucket.nonce_to_exp_time_map.remove(&nonce_key); + bucket.nonces_ordered_by_exp_time.remove(&NonceKeyWithExpTime { + txn_expiration_time: existing_exp_time, + sender_address, + nonce, + }); + }; + + // Garbage collect upto MAX_ENTRIES_GARBAGE_COLLECTED_PER_CALL expired nonces in the bucket. + let i = 0; + while (i < MAX_ENTRIES_GARBAGE_COLLECTED_PER_CALL && !bucket.nonces_ordered_by_exp_time.is_empty()) { + let (front_k, _) = bucket.nonces_ordered_by_exp_time.borrow_front(); + // We garbage collect a nonce after it has expired and the NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS + // seconds have passed. + if (front_k.txn_expiration_time + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS < current_time) { + bucket.nonces_ordered_by_exp_time.pop_front(); + bucket.nonce_to_exp_time_map.remove(&NonceKey { + sender_address: front_k.sender_address, + nonce: front_k.nonce, + }); + } else { + break; + }; + i = i + 1; + }; + + // Insert the (address, nonce) pair in the bucket. + let nonce_key_with_exp_time = NonceKeyWithExpTime { + txn_expiration_time, + sender_address, + nonce, + }; + bucket.nonces_ordered_by_exp_time.add(nonce_key_with_exp_time, true); + bucket.nonce_to_exp_time_map.add(nonce_key, txn_expiration_time); + true + } + + // Returns true if the input (address, nonce) pair doesn't exist in the nonce history. + // Returns false if the input (address, nonce) pair already exists in the nonce history. + #[test_only] + fun check_if_nonce_exists_in_history( + sender_address: address, + nonce: u64, + ): bool acquires NonceHistory { + assert!(exists(@aptos_framework), error::invalid_state(E_NONCE_HISTORY_DOES_NOT_EXIST)); + let nonce_key = NonceKey { + sender_address, + nonce, + }; + let bucket_index = sip_hash_from_value(&nonce_key) % NUM_BUCKETS; + let nonce_history = &NonceHistory[@aptos_framework]; + if (nonce_history.nonce_table.contains(bucket_index)) { + let bucket = table::borrow(&nonce_history.nonce_table, bucket_index); + let existing_exp_time = bucket.nonce_to_exp_time_map.get(&nonce_key); + if (existing_exp_time.is_some()) { + let existing_exp_time = existing_exp_time.extract(); + // We store the nonce in nonce history for `NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS` seconds after it expires. + if (timestamp::now_seconds() <= existing_exp_time + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS) { + return false; + }; + }; + }; + true + } + + #[test(fx = @aptos_framework)] + public entry fun nonce_history_test(fx: signer) acquires NonceHistory { + initialize_nonce_table(&fx); + timestamp::set_time_has_started_for_testing(&fx); + let begin_time = timestamp::now_seconds(); + + assert!(check_and_insert_nonce(@0x5, 1234, begin_time + 50)); + assert!(!check_and_insert_nonce(@0x5, 1234, begin_time + 51)); + assert!(!check_if_nonce_exists_in_history(@0x5, 1234)); + assert!(check_if_nonce_exists_in_history(@0x5, 1235)); + + timestamp::fast_forward_seconds(30); + assert!(!check_and_insert_nonce(@0x5, 1234, begin_time + 85)); + assert!(check_and_insert_nonce(@0x5, 1235, begin_time + 85)); + + timestamp::fast_forward_seconds(85); + // Nonce (0x5, 1234) expires at `begin_time + 50`. + // Nonce (0x5, 1234) will be garbage collected after + // `begin_time + 50 + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS` seconds. + assert!(!check_if_nonce_exists_in_history(@0x5, 1234)); + timestamp::fast_forward_seconds(1); + assert!(check_if_nonce_exists_in_history(@0x5, 1234)); + assert!(check_and_insert_nonce(@0x5, 1234, begin_time + 150)); + + // Nonce (0x5, 1235) expired at `begin_time + 85` seconds. + // We are currently at `begin_time + 116` seconds. + // The nonce is still stored in nonce history. + // But another nonce with expiry time higher than + // `begin_time + 85 + NONCE_REPLAY_PROTECTION_OVERLAP_INTERVAL_SECS` can still be inserted. + assert!(!check_if_nonce_exists_in_history(@0x5, 1235)); + assert!(!check_and_insert_nonce(@0x5, 1235, begin_time + 150)); + assert!(check_and_insert_nonce(@0x5, 1235, begin_time + 151)); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/object.move b/aptos-move/framework/aptos-framework/sources/object.move index 2c6d6eca3090a..d05139292fdfa 100644 --- a/aptos-move/framework/aptos-framework/sources/object.move +++ b/aptos-move/framework/aptos-framework/sources/object.move @@ -28,6 +28,7 @@ module aptos_framework::object { use aptos_framework::create_signer::create_signer; use aptos_framework::event; use aptos_framework::guid; + use aptos_framework::permissioned_signer; friend aptos_framework::coin; friend aptos_framework::primary_fungible_store; @@ -50,6 +51,8 @@ module aptos_framework::object { const EOBJECT_NOT_BURNT: u64 = 8; /// Object is untransferable any operations that might result in a transfer are disallowed. const EOBJECT_NOT_TRANSFERRABLE: u64 = 9; + /// Cannot burn an object that is already burnt. + const EOBJECT_ALREADY_BURNT: u64 = 10; /// Explicitly separate the GUID space between Object and Account to prevent accidental overlap. const INIT_GUID_CREATION_NUM: u64 = 0x4000000000000; @@ -163,6 +166,11 @@ module aptos_framework::object { self: address, } + /// Permission to transfer object with permissioned signer. + struct TransferPermission has copy, drop, store { + object: address, + } + /// Emitted whenever the object's owner field is changed. struct TransferEvent has drop, store { object: address, @@ -422,7 +430,7 @@ module aptos_framework::object { } = object_core; if (exists(ref.self)) { - let Untransferable {} = move_from(ref.self); + let Untransferable {} = move_from(ref.self); }; event::destroy_handle(transfer_events); @@ -496,15 +504,16 @@ module aptos_framework::object { to, }, ); + } else { + event::emit_event( + &mut object.transfer_events, + TransferEvent { + object: ref.self, + from: object.owner, + to, + }, + ); }; - event::emit_event( - &mut object.transfer_events, - TransferEvent { - object: ref.self, - from: object.owner, - to, - }, - ); object.owner = to; } @@ -537,6 +546,10 @@ module aptos_framework::object { to: address, ) acquires ObjectCore { let owner_address = signer::address_of(owner); + assert!( + permissioned_signer::check_permission_exists(owner, TransferPermission { object }), + error::permission_denied(EOBJECT_NOT_TRANSFERRABLE) + ); verify_ungated_and_descendant(owner_address, object); transfer_raw_inner(object, to); } @@ -552,15 +565,16 @@ module aptos_framework::object { to, }, ); + } else { + event::emit_event( + &mut object_core.transfer_events, + TransferEvent { + object, + from: object_core.owner, + to, + }, + ); }; - event::emit_event( - &mut object_core.transfer_events, - TransferEvent { - object, - from: object_core.owner, - to, - }, - ); object_core.owner = to; }; } @@ -610,15 +624,16 @@ module aptos_framework::object { }; } - /// Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. + /// Add a TombStone to the object. The object will then be interpreted as hidden via indexers. /// This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. /// Original owners can reclaim burnt objects any time in the future by calling unburn. + /// Please use the test only [`object::burn_object_with_transfer`] for testing with previously burned objects. public entry fun burn(owner: &signer, object: Object) acquires ObjectCore { let original_owner = signer::address_of(owner); assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER)); let object_addr = object.inner; + assert!(!exists(object_addr), EOBJECT_ALREADY_BURNT); move_to(&create_signer(object_addr), TombStone { original_owner }); - transfer_raw_inner(object_addr, BURN_ADDRESS); } /// Allow origin owners to reclaim any objects they previous burnt. @@ -628,10 +643,26 @@ module aptos_framework::object { ) acquires TombStone, ObjectCore { let object_addr = object.inner; assert!(exists(object_addr), error::invalid_argument(EOBJECT_NOT_BURNT)); + assert!( + permissioned_signer::check_permission_exists(original_owner, TransferPermission { object: object_addr }), + error::permission_denied(EOBJECT_NOT_TRANSFERRABLE) + ); - let TombStone { original_owner: original_owner_addr } = move_from(object_addr); - assert!(original_owner_addr == signer::address_of(original_owner), error::permission_denied(ENOT_OBJECT_OWNER)); - transfer_raw_inner(object_addr, original_owner_addr); + // The new owner of the object can always unburn it, but if it's the burn address, we go to the old functionality + let object_core = borrow_global(object_addr); + if (object_core.owner == signer::address_of(original_owner)) { + let TombStone { original_owner: _ } = move_from(object_addr); + } else if (object_core.owner == BURN_ADDRESS) { + // The old functionality + let TombStone { original_owner: original_owner_addr } = move_from(object_addr); + assert!( + original_owner_addr == signer::address_of(original_owner), + error::permission_denied(ENOT_OBJECT_OWNER) + ); + transfer_raw_inner(object_addr, original_owner_addr); + } else { + abort error::permission_denied(ENOT_OBJECT_OWNER); + }; } /// Accessors @@ -654,7 +685,7 @@ module aptos_framework::object { borrow_global(object.inner).owner } - #[view] + #[view] /// Return true if the provided address is the current owner. public fun is_owner(object: Object, owner: address): bool acquires ObjectCore { owner(object) == owner @@ -664,15 +695,16 @@ module aptos_framework::object { /// Return true if the provided address has indirect or direct ownership of the provided object. public fun owns(object: Object, owner: address): bool acquires ObjectCore { let current_address = object_address(&object); - if (current_address == owner) { - return true - }; assert!( exists(current_address), error::not_found(EOBJECT_DOES_NOT_EXIST), ); + if (current_address == owner) { + return true + }; + let object = borrow_global(current_address); let current_address = object.owner; @@ -701,6 +733,30 @@ module aptos_framework::object { obj_owner } + /// Master signer offers a transfer permission of an object to a permissioned signer. + public fun grant_permission( + master: &signer, + permissioned_signer: &signer, + object: Object, + ) { + permissioned_signer::authorize_unlimited( + master, + permissioned_signer, + TransferPermission { object: object.inner } + ) + } + + /// Grant a transfer permission to the permissioned signer using TransferRef. + public fun grant_permission_with_transfer_ref( + permissioned_signer: &signer, + ref: &TransferRef, + ) { + permissioned_signer::grant_unlimited_with_permissioned_signer( + permissioned_signer, + TransferPermission { object: ref.self } + ) + } + #[test_only] use std::option::{Self, Option}; @@ -709,6 +765,20 @@ module aptos_framework::object { #[test_only] const EWEAPON_DOES_NOT_EXIST: u64 = 0x101; + #[test_only] + /// For testing the previous behavior of `object::burn()` + /// + /// Forcefully transfer an unwanted object to BURN_ADDRESS, ignoring whether ungated_transfer is allowed. + /// This only works for objects directly owned and for simplicity does not apply to indirectly owned objects. + /// Original owners can reclaim burnt objects any time in the future by calling unburn. + public fun burn_object_with_transfer(owner: &signer, object: Object) acquires ObjectCore { + let original_owner = signer::address_of(owner); + assert!(is_owner(object, original_owner), error::permission_denied(ENOT_OBJECT_OWNER)); + let object_addr = object.inner; + move_to(&create_signer(object_addr), TombStone { original_owner }); + transfer_raw_inner(object_addr, BURN_ADDRESS); + } + #[test_only] struct HeroEquipEvent has drop, store { weapon_id: Option>, @@ -820,6 +890,16 @@ module aptos_framework::object { transfer_with_ref(linear_transfer_ref_bad, @0x789); } + #[test(creator = @0x123)] + #[expected_failure(abort_code = 0x10008, location = Self)] + fun test_cannot_unburn_legacy_after_transfer_with_ref(creator: &signer) acquires ObjectCore, TombStone { + let (hero_constructor, hero) = create_hero(creator); + burn_object_with_transfer(creator, hero); + let transfer_ref = generate_transfer_ref(&hero_constructor); + transfer_with_ref(generate_linear_transfer_ref(&transfer_ref), @0x456); + unburn(creator, hero); + } + #[test(creator = @0x123)] #[expected_failure(abort_code = 0x10008, location = Self)] fun test_cannot_unburn_after_transfer_with_ref(creator: &signer) acquires ObjectCore, TombStone { @@ -881,6 +961,28 @@ module aptos_framework::object { // Owner should be able to burn, despite ungated transfer disallowed. burn(creator, hero); + assert!(owner(hero) == signer::address_of(creator), 0); + assert!(!ungated_transfer_allowed(hero), 0); + assert!(exists(object_address(&hero)), 0); + + // Owner should be able to reclaim. + unburn(creator, hero); + assert!(owner(hero) == signer::address_of(creator), 0); + // Object still frozen. + assert!(!ungated_transfer_allowed(hero), 0); + // Tombstone gone + assert!(!exists(object_address(&hero)), 0); + } + + #[test(creator = @0x123)] + fun test_burn_and_unburn_old(creator: &signer) acquires ObjectCore, TombStone { + let (hero_constructor, hero) = create_hero(creator); + // Freeze the object. + let transfer_ref = generate_transfer_ref(&hero_constructor); + disable_ungated_transfer(&transfer_ref); + + // Owner should be able to burn, despite ungated transfer disallowed. + burn_object_with_transfer(creator, hero); assert!(owner(hero) == BURN_ADDRESS, 0); assert!(!ungated_transfer_allowed(hero), 0); @@ -1065,7 +1167,9 @@ module aptos_framework::object { #[test(creator = @0x123)] #[expected_failure(abort_code = 327689, location = Self)] - fun test_untransferable_indirect_ownership_with_linear_transfer_ref(creator: &signer) acquires ObjectCore, TombStone { + fun test_untransferable_indirect_ownership_with_linear_transfer_ref( + creator: &signer + ) acquires ObjectCore, TombStone { let (_, hero) = create_hero(creator); let (weapon_constructor_ref, weapon) = create_weapon(creator); transfer_to_object(creator, weapon, hero); @@ -1074,4 +1178,69 @@ module aptos_framework::object { set_untransferable(&weapon_constructor_ref); transfer_with_ref(linear_transfer_ref, @0x456); } + + #[test_only] + use aptos_framework::timestamp; + + #[test(creator = @0x123)] + fun test_transfer_permission_e2e( + creator: &signer, + ) acquires ObjectCore { + let aptos_framework = account::create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let (_, hero) = create_hero(creator); + let (_, weapon) = create_weapon(creator); + + // Create a permissioned signer + let creator_permission_handle = permissioned_signer::create_permissioned_handle(creator); + let creator_permission_signer = permissioned_signer::signer_from_permissioned_handle(&creator_permission_handle); + + // Grant aaron_permission_signer permission to transfer weapon object + grant_permission(creator, &creator_permission_signer, weapon); + transfer_to_object(&creator_permission_signer, weapon, hero); + + permissioned_signer::destroy_permissioned_handle(creator_permission_handle); + } + + #[test(creator = @0x123)] + #[expected_failure(abort_code = 327689, location = Self)] + fun test_transfer_no_permission( + creator: &signer, + ) acquires ObjectCore { + let aptos_framework = account::create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let (_, hero) = create_hero(creator); + let (_, weapon) = create_weapon(creator); + + // Create a permissioned signer + let creator_permission_handle = permissioned_signer::create_permissioned_handle(creator); + let creator_permission_signer = permissioned_signer::signer_from_permissioned_handle(&creator_permission_handle); + + transfer_to_object(&creator_permission_signer, weapon, hero); + + permissioned_signer::destroy_permissioned_handle(creator_permission_handle); + } + + #[test(creator = @0x123)] + fun test_create_and_transfer( + creator: &signer, + ) acquires ObjectCore { + let aptos_framework = account::create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let (_, hero) = create_hero(creator); + let (weapon_ref, weapon) = create_weapon(creator); + let t_ref = generate_transfer_ref(&weapon_ref); + + // Create a permissioned signer + let creator_permission_handle = permissioned_signer::create_permissioned_handle(creator); + let creator_permission_signer = permissioned_signer::signer_from_permissioned_handle(&creator_permission_handle); + + grant_permission_with_transfer_ref(&creator_permission_signer, &t_ref); + transfer_to_object(&creator_permission_signer, weapon, hero); + + permissioned_signer::destroy_permissioned_handle(creator_permission_handle); + } } diff --git a/aptos-move/framework/aptos-framework/sources/object.spec.move b/aptos-move/framework/aptos-framework/sources/object.spec.move index d2627d649fd61..d7c193cf636b9 100644 --- a/aptos-move/framework/aptos-framework/sources/object.spec.move +++ b/aptos-move/framework/aptos-framework/sources/object.spec.move @@ -46,7 +46,15 @@ spec aptos_framework::object { /// /// spec module { - pragma aborts_if_is_strict; + pragma verify = false; + pragma aborts_if_is_partial; + } + + spec grant_permission { + pragma aborts_if_is_partial; + aborts_if !permissioned_signer::spec_is_permissioned_signer(permissioned_signer); + aborts_if permissioned_signer::spec_is_permissioned_signer(master); + aborts_if signer::address_of(master) != signer::address_of(permissioned_signer); } spec fun spec_exists_at(object: address): bool; @@ -312,9 +320,9 @@ spec aptos_framework::object { } spec create_object_internal( - creator_address: address, - object: address, - can_delete: bool, + creator_address: address, + object: address, + can_delete: bool, ): ConstructorRef { // property 1: Creating an object twice on the same address must never occur. /// [high-level-req-1] @@ -356,7 +364,7 @@ spec aptos_framework::object { } spec new_event_handle( - object: &signer, + object: &signer, ): event::EventHandle { aborts_if !exists(signer::address_of(object)); //Guid properties @@ -426,9 +434,9 @@ spec aptos_framework::object { } spec transfer_call( - owner: &signer, - object: address, - to: address, + owner: &signer, + object: address, + to: address, ) { pragma aborts_if_is_partial; // TODO: Verify the link list loop in verify_ungated_and_descendant @@ -438,9 +446,9 @@ spec aptos_framework::object { } spec transfer( - owner: &signer, - object: Object, - to: address, + owner: &signer, + object: Object, + to: address, ) { pragma aborts_if_is_partial; // TODO: Verify the link list loop in verify_ungated_and_descendant @@ -451,9 +459,9 @@ spec aptos_framework::object { } spec transfer_raw( - owner: &signer, - object: address, - to: address, + owner: &signer, + object: address, + to: address, ) { pragma aborts_if_is_partial; // TODO: Verify the link list loop in verify_ungated_and_descendant @@ -463,9 +471,9 @@ spec aptos_framework::object { } spec transfer_to_object ( - owner: &signer, - object: Object, - to: Object, + owner: &signer, + object: Object, + to: Object, ) { pragma aborts_if_is_partial; // TODO: Verify the link list loop in verify_ungated_and_descendant @@ -476,6 +484,15 @@ spec aptos_framework::object { } spec burn(owner: &signer, object: Object) { + pragma aborts_if_is_partial; + let object_address = object.inner; + aborts_if !exists(object_address); + aborts_if owner(object) != signer::address_of(owner); + ensures exists(object_address); + ensures is_owner(object, signer::address_of(owner)); + } + + spec burn_object_with_transfer(owner: &signer, object: Object) { pragma aborts_if_is_partial; let object_address = object.inner; aborts_if !exists(object_address); @@ -489,7 +506,9 @@ spec aptos_framework::object { aborts_if !exists(object_address); aborts_if !is_burnt(object); let tomb_stone = borrow_global(object_address); - aborts_if tomb_stone.original_owner != signer::address_of(original_owner); + let original_owner_address = signer::address_of(original_owner); + let object_current_owner = borrow_global(object_address).owner; + aborts_if object_current_owner != original_owner_address && tomb_stone.original_owner != original_owner_address; } spec verify_ungated_and_descendant(owner: address, destination: address) { diff --git a/aptos-move/framework/aptos-framework/sources/object_code_deployment.move b/aptos-move/framework/aptos-framework/sources/object_code_deployment.move index ef9e7d37fe9df..f611d1003573d 100644 --- a/aptos-move/framework/aptos-framework/sources/object_code_deployment.move +++ b/aptos-move/framework/aptos-framework/sources/object_code_deployment.move @@ -47,6 +47,8 @@ module aptos_framework::object_code_deployment { const ENOT_CODE_OBJECT_OWNER: u64 = 2; /// `code_object` does not exist. const ECODE_OBJECT_DOES_NOT_EXIST: u64 = 3; + /// Current permissioned signer cannot deploy object code. + const ENO_CODE_PERMISSION: u64 = 4; const OBJECT_CODE_DEPLOYMENT_DOMAIN_SEPARATOR: vector = b"aptos_framework::object_code_deployment"; @@ -84,6 +86,7 @@ module aptos_framework::object_code_deployment { metadata_serialized: vector, code: vector>, ) { + code::check_code_publishing_permission(publisher); assert!( features::is_object_code_deployment_enabled(), error::unavailable(EOBJECT_CODE_DEPLOYMENT_NOT_SUPPORTED), @@ -120,6 +123,7 @@ module aptos_framework::object_code_deployment { code: vector>, code_object: Object, ) acquires ManagingRefs { + code::check_code_publishing_permission(publisher); let publisher_address = signer::address_of(publisher); assert!( object::is_owner(code_object, publisher_address), diff --git a/aptos-move/framework/aptos-framework/sources/permissioned_signer.move b/aptos-move/framework/aptos-framework/sources/permissioned_signer.move new file mode 100644 index 0000000000000..ffee9c8004f1d --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/permissioned_signer.move @@ -0,0 +1,708 @@ +/// A _permissioned signer_ consists of a pair of the original signer and a generated +/// address which is used to store information about associated permissions. +/// +/// A permissioned signer is a restricted version of a signer. Functions `move_to` and +/// `address_of` behave the same, and can be passed wherever signer is needed. However, +/// code can internally query for the permissions to assert additional restrictions on +/// the use of the signer. +/// +/// A client which is interested in restricting access granted via a signer can create a permissioned signer +/// and pass on to other existing code without changes to existing APIs. Core functions in the framework, for +/// example account functions, can then assert availability of permissions, effectively restricting +/// existing code in a compatible way. +/// +/// After introducing the core functionality, examples are provided for withdraw limit on accounts, and +/// for blind signing. +module aptos_framework::permissioned_signer { + use std::features; + use std::signer; + use std::error; + use std::vector; + use std::option::{Option, Self}; + use aptos_std::copyable_any::{Self, Any}; + use aptos_framework::big_ordered_map::{Self, BigOrderedMap}; + use aptos_framework::create_signer::create_signer; + use aptos_framework::transaction_context::generate_auid_address; + use aptos_framework::timestamp; + + /// Trying to grant permission using non-master signer. + const ENOT_MASTER_SIGNER: u64 = 1; + + /// Cannot authorize a permission. + const ECANNOT_AUTHORIZE: u64 = 2; + + /// Access permission information from a master signer. + const ENOT_PERMISSIONED_SIGNER: u64 = 3; + + /// signer doesn't have enough capacity to extract permission. + const ECANNOT_EXTRACT_PERMISSION: u64 = 4; + + /// permission handle has expired. + const E_PERMISSION_EXPIRED: u64 = 5; + + /// storing extracted permission into a different signer. + const E_PERMISSION_MISMATCH: u64 = 6; + + /// permission handle has been revoked by the original signer. + const E_PERMISSION_REVOKED: u64 = 7; + + /// destroying permission handle that has already been revoked or not owned by the + /// given master signer. + const E_NOT_ACTIVE: u64 = 8; + + /// Permissioned signer feature is not activated. + const EPERMISSION_SIGNER_DISABLED: u64 = 9; + + const U256_MAX: u256 = + 115792089237316195423570985008687907853269984665640564039457584007913129639935; + + /// If a permissioned signer has this permission, it would be able to revoke other granted + /// permission handles in the same signer. + struct RevokePermissionHandlePermission has copy, store, drop {} + + /// Stores the list of granted permission handles for a given account. + struct GrantedPermissionHandles has key { + /// Each address refers to a `permissions_storage_addr` that stores the `PermissionStorage`. + active_handles: vector
+ } + + /// A ephermeral permission handle that can be used to generate a permissioned signer with permission + /// configuration stored within. + enum PermissionedHandle { + V1 { + /// Address of the signer that creates this handle. + master_account_addr: address, + /// Address that stores `PermissionStorage`. + permissions_storage_addr: address + } + } + + /// A permission handle that can be used to generate a permissioned signer. + /// + /// This handle is storable and thus should be treated very carefully as it serves similar functionality + /// as signer delegation. + enum StorablePermissionedHandle has store { + V1 { + /// Address of the signer that creates this handle. + master_account_addr: address, + /// Address that stores `PermissionStorage`. + permissions_storage_addr: address, + /// Permissioned signer can no longer be generated from this handle after `expiration_time`. + expiration_time: u64 + } + } + + /// The actual permission configuration stored on-chain. + /// + /// The address that holds `PermissionStorage` will be generated freshly every time a permission + /// handle gets created. + enum PermissionStorage has key { + V1 { + /// A hetherogenous map from `Permission` structs defined by each different modules to + /// its permission capacity. + perms: BigOrderedMap + } + } + + /// Types of permission capacity stored on chain. + enum StoredPermission has store, copy, drop { + /// Unlimited capacity. + Unlimited, + /// Fixed capacity, will be deducted when permission is used. + Capacity(u256), + } + + /// Create an ephermeral permission handle based on the master signer. + /// + /// This handle can be used to derive a signer that can be used in the context of + /// the current transaction. + public fun create_permissioned_handle(master: &signer): PermissionedHandle { + assert!( + features::is_permissioned_signer_enabled(), + error::permission_denied(EPERMISSION_SIGNER_DISABLED) + ); + + assert_master_signer(master); + let permissions_storage_addr = generate_auid_address(); + let master_account_addr = signer::address_of(master); + + initialize_permission_address(permissions_storage_addr); + + PermissionedHandle::V1 { master_account_addr, permissions_storage_addr } + } + + /// Destroys an ephermeral permission handle. Clean up the permission stored in that handle + public fun destroy_permissioned_handle(p: PermissionedHandle) acquires PermissionStorage { + assert!( + features::is_permissioned_signer_enabled(), + error::permission_denied(EPERMISSION_SIGNER_DISABLED) + ); + let PermissionedHandle::V1 { master_account_addr: _, permissions_storage_addr } = + p; + destroy_permissions_storage_address(permissions_storage_addr); + } + + /// Generate the permissioned signer based on the ephermeral permission handle. + /// + /// This signer can be used as a regular signer for other smart contracts. However when such + /// signer interacts with various framework functions, it would subject to permission checks + /// and would abort if check fails. + public fun signer_from_permissioned_handle(p: &PermissionedHandle): signer { + assert!( + features::is_permissioned_signer_enabled(), + error::permission_denied(EPERMISSION_SIGNER_DISABLED) + ); + signer_from_permissioned_handle_impl( + p.master_account_addr, p.permissions_storage_addr + ) + } + + /// Returns true if `s` is a permissioned signer. + public fun is_permissioned_signer(s: &signer): bool { + // When the permissioned signer is disabled, no one is able to construct a permissioned + // signer. Thus we should return false here, as other on chain permission checks will + // depend on this checks. + if(!features::is_permissioned_signer_enabled()) { + return false; + }; + is_permissioned_signer_impl(s) + } + + /// Grant the permissioned signer the permission to revoke granted permission handles under + /// its address. + public fun grant_revoke_permission( + master: &signer, + permissioned: &signer, + ) acquires PermissionStorage { + assert!( + features::is_permissioned_signer_enabled(), + error::permission_denied(EPERMISSION_SIGNER_DISABLED) + ); + authorize_unlimited(master, permissioned, RevokePermissionHandlePermission {}); + } + + /// Revoke a specific storable permission handle immediately. This will disallow owner of + /// the storable permission handle to derive signer from it anymore. + public entry fun revoke_permission_storage_address( + s: &signer, permissions_storage_addr: address + ) acquires GrantedPermissionHandles, PermissionStorage { + assert!( + features::is_permissioned_signer_enabled(), + error::permission_denied(EPERMISSION_SIGNER_DISABLED) + ); + assert!( + check_permission_exists(s, RevokePermissionHandlePermission {}), + error::permission_denied(ENOT_MASTER_SIGNER) + ); + let master_account_addr = signer::address_of(s); + + assert!( + exists(master_account_addr), + error::permission_denied(E_PERMISSION_REVOKED), + ); + let active_handles = &mut GrantedPermissionHandles[master_account_addr].active_handles; + let (found, idx) = active_handles.index_of(&permissions_storage_addr); + + // The address has to be in the activated list in the master account address. + assert!(found, error::permission_denied(E_NOT_ACTIVE)); + active_handles.swap_remove(idx); + destroy_permissions_storage_address(permissions_storage_addr); + } + + /// Revoke all storable permission handle of the signer immediately. + public entry fun revoke_all_handles(s: &signer) acquires GrantedPermissionHandles, PermissionStorage { + assert!( + features::is_permissioned_signer_enabled(), + error::permission_denied(EPERMISSION_SIGNER_DISABLED) + ); + assert!( + check_permission_exists(s, RevokePermissionHandlePermission {}), + error::permission_denied(ENOT_MASTER_SIGNER) + ); + let master_account_addr = signer::address_of(s); + if (!exists(master_account_addr)) { return }; + + let granted_permissions = + borrow_global_mut(master_account_addr); + let delete_list = vector::trim_reverse( + &mut granted_permissions.active_handles, 0 + ); + vector::destroy( + delete_list, + |address| { + destroy_permissions_storage_address(address); + } + ) + } + + /// initialize permission storage by putting an empty storage under the address. + inline fun initialize_permission_address(permissions_storage_addr: address) { + move_to( + &create_signer(permissions_storage_addr), + // Each key is ~100bytes, the value is 12 bytes. + PermissionStorage::V1 { perms: big_ordered_map::new_with_config(40, 35, false) } + ); + } + + /// Create an storable permission handle based on the master signer. + /// + /// This handle can be used to derive a signer that can be stored by a smart contract. + /// This is as dangerous as key delegation, thus it remains public(package) for now. + /// + /// The caller should check if `expiration_time` is not too far in the future. + public(package) fun create_storable_permissioned_handle( + master: &signer, expiration_time: u64 + ): StorablePermissionedHandle acquires GrantedPermissionHandles { + assert!( + features::is_permissioned_signer_enabled(), + error::permission_denied(EPERMISSION_SIGNER_DISABLED) + ); + + assert_master_signer(master); + let permissions_storage_addr = generate_auid_address(); + let master_account_addr = signer::address_of(master); + + assert!( + timestamp::now_seconds() < expiration_time, + error::permission_denied(E_PERMISSION_EXPIRED) + ); + + if (!exists(master_account_addr)) { + move_to( + master, GrantedPermissionHandles { active_handles: vector::empty() } + ); + }; + + GrantedPermissionHandles[master_account_addr] + .active_handles.push_back(permissions_storage_addr); + + initialize_permission_address(permissions_storage_addr); + + StorablePermissionedHandle::V1 { + master_account_addr, + permissions_storage_addr, + expiration_time + } + } + + /// Destroys a storable permission handle. Clean up the permission stored in that handle + public(package) fun destroy_storable_permissioned_handle( + p: StorablePermissionedHandle + ) acquires PermissionStorage, GrantedPermissionHandles { + let StorablePermissionedHandle::V1 { + master_account_addr, + permissions_storage_addr, + expiration_time: _ + } = p; + + assert!( + exists(master_account_addr), + error::permission_denied(E_PERMISSION_REVOKED), + ); + let active_handles = &mut GrantedPermissionHandles[master_account_addr].active_handles; + + let (found, idx) = active_handles.index_of(&permissions_storage_addr); + + // Removing the address from the active handle list if it's still active. + if(found) { + active_handles.swap_remove(idx); + }; + + destroy_permissions_storage_address(permissions_storage_addr); + } + + inline fun destroy_permissions_storage_address( + permissions_storage_addr: address + ) acquires PermissionStorage { + if (exists(permissions_storage_addr)) { + let PermissionStorage::V1 { perms } = + move_from(permissions_storage_addr); + big_ordered_map::destroy( + perms, + |_dv| {}, + ); + } + } + + /// Generate the permissioned signer based on the storable permission handle. + public(package) fun signer_from_storable_permissioned_handle( + p: &StorablePermissionedHandle + ): signer { + assert!( + features::is_permissioned_signer_enabled(), + error::permission_denied(EPERMISSION_SIGNER_DISABLED) + ); + assert!( + timestamp::now_seconds() < p.expiration_time, + error::permission_denied(E_PERMISSION_EXPIRED) + ); + assert!( + exists(p.permissions_storage_addr), + error::permission_denied(E_PERMISSION_REVOKED) + ); + signer_from_permissioned_handle_impl( + p.master_account_addr, p.permissions_storage_addr + ) + } + + /// Return the permission handle address so that it could be used for revocation purpose. + public(package) fun permissions_storage_address( + p: &StorablePermissionedHandle + ): address { + p.permissions_storage_addr + } + + /// Helper function that would abort if the signer passed in is a permissioned signer. + public(package) fun assert_master_signer(s: &signer) { + assert!( + !is_permissioned_signer(s), error::permission_denied(ENOT_MASTER_SIGNER) + ); + } + + /// ===================================================================================================== + /// StoredPermission operations + /// + /// check if StoredPermission has at least `threshold` capacity. + fun is_above(perm: &StoredPermission, threshold: u256): bool { + match (perm) { + StoredPermission::Capacity(capacity) => *capacity >= threshold, + StoredPermission::Unlimited => true, + } + } + + /// consume `threshold` capacity from StoredPermission + fun consume_capacity(perm: &mut StoredPermission, threshold: u256): bool { + match (perm) { + StoredPermission::Capacity(current_capacity) => { + if (*current_capacity >= threshold) { + *current_capacity = *current_capacity - threshold; + true + } else { false } + } + StoredPermission::Unlimited => true + } + } + + /// increase `threshold` capacity from StoredPermission + fun increase_capacity(perm: &mut StoredPermission, threshold: u256) { + match (perm) { + StoredPermission::Capacity(current_capacity) => { + *current_capacity = *current_capacity + threshold; + } + StoredPermission::Unlimited => (), + } + } + + /// merge the two stored permission + fun merge(lhs: &mut StoredPermission, rhs: StoredPermission) { + match (rhs) { + StoredPermission::Capacity(new_capacity) => { + match (lhs) { + StoredPermission::Capacity(current_capacity) => { + *current_capacity = *current_capacity + new_capacity; + } + StoredPermission::Unlimited => (), + } + } + StoredPermission::Unlimited => *lhs = StoredPermission::Unlimited, + } + } + + /// ===================================================================================================== + /// Permission Management + /// + /// Authorizes `permissioned` with the given permission. This requires to have access to the `master` + /// signer. + + inline fun map_or( + permissioned: &signer, + perm: PermKey, + mutate: |&mut StoredPermission| T, + default: T, + ): T { + let permission_signer_addr = permission_address(permissioned); + assert!( + exists(permission_signer_addr), + error::permission_denied(E_NOT_ACTIVE) + ); + let perms = + &mut borrow_global_mut(permission_signer_addr).perms; + let key = copyable_any::pack(perm); + if (big_ordered_map::contains(perms, &key)) { + let value = perms.remove(&key); + let return_ = mutate(&mut value); + perms.add(key, value); + return_ + } else { + default + } + } + + inline fun insert_or( + permissioned: &signer, + perm: PermKey, + mutate: |&mut StoredPermission|, + default: StoredPermission, + ) { + let permission_signer_addr = permission_address(permissioned); + assert!( + exists(permission_signer_addr), + error::permission_denied(E_NOT_ACTIVE) + ); + let perms = + &mut borrow_global_mut(permission_signer_addr).perms; + let key = copyable_any::pack(perm); + if (perms.contains(&key)) { + let value = perms.remove(&key); + mutate(&mut value); + perms.add(key, value); + } else { + perms.add(key, default); + } + } + + /// Authorizes `permissioned` with a given capacity and increment the existing capacity if present. + /// + /// Consumption using `check_permission_consume` will deduct the capacity. + public(package) fun authorize_increase( + master: &signer, + permissioned: &signer, + capacity: u256, + perm: PermKey + ) acquires PermissionStorage { + assert!( + is_permissioned_signer(permissioned) + && !is_permissioned_signer(master) + && signer::address_of(master) == signer::address_of(permissioned), + error::permission_denied(ECANNOT_AUTHORIZE) + ); + insert_or( + permissioned, + perm, + |stored_permission| { + increase_capacity(stored_permission, capacity); + }, + StoredPermission::Capacity(capacity), + ) + } + + /// Authorizes `permissioned` with the given unlimited permission. + /// Unlimited permission can be consumed however many times. + public(package) fun authorize_unlimited( + master: &signer, + permissioned: &signer, + perm: PermKey + ) acquires PermissionStorage { + assert!( + is_permissioned_signer(permissioned) + && !is_permissioned_signer(master) + && signer::address_of(master) == signer::address_of(permissioned), + error::permission_denied(ECANNOT_AUTHORIZE) + ); + insert_or( + permissioned, + perm, + |stored_permission| { + *stored_permission = StoredPermission::Unlimited; + }, + StoredPermission::Unlimited, + ) + } + + /// Grant an unlimited permission to a permissioned signer **without** master signer's approvoal. + public(package) fun grant_unlimited_with_permissioned_signer( + permissioned: &signer, + perm: PermKey + ) acquires PermissionStorage { + if(!is_permissioned_signer(permissioned)) { + return; + }; + insert_or( + permissioned, + perm, + |stored_permission| { + *stored_permission = StoredPermission::Unlimited; + }, + StoredPermission::Unlimited, + ) + } + + /// Increase the `capacity` of a permissioned signer **without** master signer's approvoal. + /// + /// The caller of the module will need to make sure the witness type `PermKey` can only be + /// constructed within its own module, otherwise attackers can refill the permission for itself + /// to bypass the checks. + public(package) fun increase_limit( + permissioned: &signer, + capacity: u256, + perm: PermKey + ) acquires PermissionStorage { + if(!is_permissioned_signer(permissioned)) { + return; + }; + insert_or( + permissioned, + perm, + |stored_permission| { + increase_capacity(stored_permission, capacity); + }, + StoredPermission::Capacity(capacity), + ) + } + + public(package) fun check_permission_exists( + s: &signer, perm: PermKey + ): bool acquires PermissionStorage { + // 0 capacity permissions will be treated as non-existant. + check_permission_capacity_above(s, 1, perm) + } + + public(package) fun check_permission_capacity_above( + s: &signer, threshold: u256, perm: PermKey + ): bool acquires PermissionStorage { + if (!is_permissioned_signer(s)) { + // master signer has all permissions + return true + }; + map_or( + s, + perm, + |stored_permission| { + is_above(stored_permission, threshold) + }, + false, + ) + } + + public(package) fun check_permission_consume( + s: &signer, threshold: u256, perm: PermKey + ): bool acquires PermissionStorage { + if (!is_permissioned_signer(s)) { + // master signer has all permissions + return true + }; + map_or( + s, + perm, + |stored_permission| { + consume_capacity(stored_permission, threshold) + }, + false, + ) + } + + public(package) fun capacity( + s: &signer, perm: PermKey + ): Option acquires PermissionStorage { + if (!is_permissioned_signer(s)) { + return option::some(U256_MAX) + }; + map_or( + s, + perm, + |stored_permission: &mut StoredPermission| { + option::some(match (stored_permission) { + StoredPermission::Capacity(capacity) => *capacity, + StoredPermission::Unlimited => U256_MAX, + }) + }, + option::none(), + ) + } + + public(package) fun revoke_permission( + permissioned: &signer, perm: PermKey + ) acquires PermissionStorage { + if (!is_permissioned_signer(permissioned)) { + // Master signer has no permissions associated with it. + return + }; + let addr = permission_address(permissioned); + if (!exists(addr)) { return }; + let perm_storage = &mut PermissionStorage[addr].perms; + let key = copyable_any::pack(perm); + if (perm_storage.contains(&key)) { + perm_storage.remove(&key); + } + } + + /// Unused function. Keeping it for compatibility purpose. + public fun address_of(_s: &signer): address { + abort error::permission_denied(EPERMISSION_SIGNER_DISABLED) + } + + /// Unused function. Keeping it for compatibility purpose. + public fun borrow_address(_s: &signer): &address { + abort error::permission_denied(EPERMISSION_SIGNER_DISABLED) + } + + // ===================================================================================================== + // Native Functions + /// + /// Check whether this is a permissioned signer. + native fun is_permissioned_signer_impl(s: &signer): bool; + /// Return the address used for storing permissions. Aborts if not a permissioned signer. + native fun permission_address(permissioned: &signer): address; + /// Creates a permissioned signer from an existing universal signer. The function aborts if the + /// given signer is already a permissioned signer. + /// + /// The implementation of this function requires to extend the value representation for signers in the VM. + /// invariants: + /// signer::address_of(master) == signer::address_of(signer_from_permissioned_handle(create_permissioned_handle(master))), + /// + native fun signer_from_permissioned_handle_impl( + master_account_addr: address, permissions_storage_addr: address + ): signer; + + #[test(creator = @0xcafe)] + fun signer_address_roundtrip( + creator: &signer + ) acquires PermissionStorage, GrantedPermissionHandles { + let aptos_framework = create_signer(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let handle = create_permissioned_handle(creator); + let perm_signer = signer_from_permissioned_handle(&handle); + assert!(signer::address_of(&perm_signer) == signer::address_of(creator), 1); + assert!( + permission_address(&perm_signer) + == handle.permissions_storage_addr, + 1 + ); + assert!(exists(handle.permissions_storage_addr), 1); + + destroy_permissioned_handle(handle); + + let handle = create_storable_permissioned_handle(creator, 60); + let perm_signer = signer_from_storable_permissioned_handle(&handle); + assert!(signer::address_of(&perm_signer) == signer::address_of(creator), 1); + assert!( + permission_address(&perm_signer) + == handle.permissions_storage_addr, + 1 + ); + assert!(exists(handle.permissions_storage_addr), 1); + + destroy_storable_permissioned_handle(handle); + } + + #[test_only] + use aptos_std::bcs; + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0x1C5, location = aptos_std::bcs)] + fun signer_serialization( + creator: &signer + ) acquires PermissionStorage { + let aptos_framework = create_signer(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let handle = create_permissioned_handle(creator); + let perm_signer = signer_from_permissioned_handle(&handle); + + assert!(bcs::to_bytes(creator) == bcs::to_bytes(&signer::address_of(creator)), 1); + bcs::to_bytes(&perm_signer); + + destroy_permissioned_handle(handle); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/permissioned_signer.spec.move b/aptos-move/framework/aptos-framework/sources/permissioned_signer.spec.move new file mode 100644 index 0000000000000..32a3f7fb0395a --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/permissioned_signer.spec.move @@ -0,0 +1,172 @@ +spec aptos_framework::permissioned_signer { + + spec module { + pragma verify = true; + axiom forall a: GrantedPermissionHandles: + ( + forall i in 0..len(a.active_handles): + forall j in 0..len(a.active_handles): + i != j ==> + a.active_handles[i] != a.active_handles[j] + ); + } + + spec fun spec_is_permissioned_signer_impl(s: signer): bool; + + spec is_permissioned_signer_impl(s: &signer): bool { + pragma opaque; + ensures [abstract] result == spec_is_permissioned_signer_impl(s); + } + + spec fun spec_is_permissioned_signer(s: signer): bool { + use std::features; + use std::features::PERMISSIONED_SIGNER; + if (!features::spec_is_enabled(PERMISSIONED_SIGNER)) { + false + } else { + spec_is_permissioned_signer_impl(s) + } + } + + spec is_permissioned_signer(s: &signer): bool { + pragma opaque; + aborts_if [abstract] false; + ensures [abstract] result == spec_is_permissioned_signer(s); + } + + spec fun spec_permission_address(s: signer): address; + + spec permission_address(permissioned: &signer): address { + pragma opaque; + aborts_if [abstract]!spec_is_permissioned_signer(permissioned); + ensures [abstract] result == spec_permission_address(permissioned); + } + + spec fun spec_signer_from_permissioned_handle_impl( + master_account_addr: address, permissions_storage_addr: address + ): signer; + + spec signer_from_permissioned_handle_impl( + master_account_addr: address, permissions_storage_addr: address + ): signer { + pragma opaque; + ensures [abstract] result + == spec_signer_from_permissioned_handle_impl( + master_account_addr, permissions_storage_addr + ); + } + + spec create_permissioned_handle(master: &signer): PermissionedHandle { + use aptos_framework::transaction_context; + pragma opaque; + aborts_if [abstract] spec_is_permissioned_signer(master); + let permissions_storage_addr = transaction_context::spec_generate_unique_address(); + modifies global(permissions_storage_addr); + let master_account_addr = signer::address_of(master); + ensures result.master_account_addr == master_account_addr; + ensures result.permissions_storage_addr == permissions_storage_addr; + } + + spec create_storable_permissioned_handle(master: &signer, expiration_time: u64): StorablePermissionedHandle { + use aptos_framework::transaction_context; + pragma opaque; + aborts_if [abstract] spec_is_permissioned_signer(master); + let permissions_storage_addr = transaction_context::spec_generate_unique_address(); + modifies global(permissions_storage_addr); + let master_account_addr = signer::address_of(master); + modifies global(master_account_addr); + ensures result.master_account_addr == master_account_addr; + ensures result.permissions_storage_addr == permissions_storage_addr; + ensures result.expiration_time == expiration_time; + ensures vector::spec_contains( + global(master_account_addr).active_handles, + permissions_storage_addr + ); + ensures exists(master_account_addr); + } + + spec destroy_permissioned_handle(p: PermissionedHandle) { + ensures !exists(p.permissions_storage_addr); + } + + spec destroy_storable_permissioned_handle(p: StorablePermissionedHandle) { + ensures !exists(p.permissions_storage_addr); + let post granted_permissions = global( + p.master_account_addr + ); + } + + spec revoke_permission_storage_address(s: &signer, permissions_storage_addr: address) { + // aborts_if spec_is_permissioned_signer(s); + } + + spec authorize_increase( + master: &signer, permissioned: &signer, capacity: u256, perm: PermKey + ) { + pragma aborts_if_is_partial; + aborts_if !spec_is_permissioned_signer(permissioned); + aborts_if spec_is_permissioned_signer(master); + aborts_if signer::address_of(permissioned) != signer::address_of(master); + ensures exists( + spec_permission_address(permissioned) + ); + } + + spec check_permission_exists(s: &signer, perm: PermKey): bool { + pragma opaque; + modifies global(spec_permission_address(s)); + ensures [abstract] result == spec_check_permission_exists(s, perm); + } + + spec fun spec_check_permission_exists(s: signer, perm: PermKey): bool; + + // TODO(teng): add this back later + // spec fun spec_check_permission_exists(s: signer, perm: PermKey): bool { + // use aptos_std::type_info; + // use std::bcs; + // let addr = spec_permission_address(s); + // let key = Any { + // type_name: type_info::type_name(), + // data: bcs::serialize(perm) + // }; + // if (!spec_is_permissioned_signer(s)) { true } + // else if (!exists(addr)) { false } + // else { + // // ordered_map::spec_contains_key(global(addr).perms, key) + // // FIXME: ordered map spec doesn't exist yet. + // true + // } + // } + + spec check_permission_capacity_above( + s: &signer, threshold: u256, perm: PermKey + ): bool { + modifies global(spec_permission_address(s)); + let permissioned_signer_addr = spec_permission_address(s); + ensures !spec_is_permissioned_signer(s) ==> result == true; + ensures ( + spec_is_permissioned_signer(s) + && !exists(permissioned_signer_addr) + ) ==> result == false; + } + + spec check_permission_consume( + s: &signer, threshold: u256, perm: PermKey + ): bool { + pragma opaque; + let permissioned_signer_addr = spec_permission_address(s); + modifies global(spec_permission_address(s)); + ensures [abstract] result == spec_check_permission_consume(s, threshold, perm); + } + + spec fun spec_check_permission_consume(s: signer, threshold: u256, perm: PermKey): bool; + + spec capacity(s: &signer, perm: PermKey): Option { + pragma opaque; + let permissioned_signer_addr = spec_permission_address(s); + modifies global(spec_permission_address(s)); + ensures [abstract] result == spec_capacity(s, perm); + } + + spec fun spec_capacity(s: signer, perm: PermKey): Option; +} diff --git a/aptos-move/framework/aptos-framework/sources/primary_fungible_store.move b/aptos-move/framework/aptos-framework/sources/primary_fungible_store.move index fcc6f95021f65..6891253cee494 100644 --- a/aptos-move/framework/aptos-framework/sources/primary_fungible_store.move +++ b/aptos-move/framework/aptos-framework/sources/primary_fungible_store.move @@ -15,12 +15,14 @@ module aptos_framework::primary_fungible_store { use aptos_framework::dispatchable_fungible_asset; use aptos_framework::fungible_asset::{Self, FungibleAsset, FungibleStore, Metadata, MintRef, TransferRef, BurnRef}; use aptos_framework::object::{Self, Object, ConstructorRef, DeriveRef}; - use aptos_framework::account; use std::option::Option; use std::signer; use std::string::String; + #[test_only] + use aptos_framework::permissioned_signer; + #[resource_group_member(group = aptos_framework::object::ObjectGroup)] /// A resource that holds the derive ref for the fungible asset metadata object. This is used to create primary /// stores for users with deterministic addresses so that users can easily deposit/withdraw/transfer fungible @@ -125,11 +127,38 @@ module aptos_framework::primary_fungible_store { fungible_asset::store_exists(primary_store_address_inlined(account, metadata)) } + public fun grant_permission( + master: &signer, + permissioned: &signer, + metadata: Object, + amount: u64 + ) { + fungible_asset::grant_permission_by_address( + master, + permissioned, + primary_store_address_inlined(signer::address_of(permissioned), metadata), + amount + ); + } + + public fun grant_apt_permission( + master: &signer, + permissioned: &signer, + amount: u64 + ) { + fungible_asset::grant_permission_by_address( + master, + permissioned, + object::create_user_derived_object_address(signer::address_of(permissioned), @aptos_fungible_asset), + amount + ); + } + #[view] /// Get the balance of `account`'s primary store. public fun balance(account: address, metadata: Object): u64 { if (primary_store_exists(account, metadata)) { - fungible_asset::balance(primary_store(account, metadata)) + dispatchable_fungible_asset::derived_balance(primary_store(account, metadata)) } else { 0 } @@ -138,7 +167,7 @@ module aptos_framework::primary_fungible_store { #[view] public fun is_balance_at_least(account: address, metadata: Object, amount: u64): bool { if (primary_store_exists(account, metadata)) { - fungible_asset::is_balance_at_least(primary_store(account, metadata), amount) + dispatchable_fungible_asset::is_derived_balance_at_least(primary_store(account, metadata), amount) } else { amount == 0 } @@ -169,11 +198,22 @@ module aptos_framework::primary_fungible_store { dispatchable_fungible_asset::deposit(store, fa); } - /// Deposit fungible asset `fa` to the given account's primary store. - public(friend) fun force_deposit(owner: address, fa: FungibleAsset) acquires DeriveRefPod { + /// Deposit fungible asset `fa` to the given account's primary store using signer. + /// + /// If `owner` is a permissioned signer, the signer will be granted with permission to withdraw + /// the same amount of fund in the future. + public fun deposit_with_signer(owner: &signer, fa: FungibleAsset) acquires DeriveRefPod { + fungible_asset::refill_permission( + owner, + fungible_asset::amount(&fa), + primary_store_address_inlined( + signer::address_of(owner), + fungible_asset::metadata_from_asset(&fa), + ) + ); let metadata = fungible_asset::asset_metadata(&fa); - let store = ensure_primary_store_exists(owner, metadata); - fungible_asset::deposit_internal(object::object_address(&store), fa); + let store = ensure_primary_store_exists(signer::address_of(owner), metadata); + dispatchable_fungible_asset::deposit(store, fa); } /// Transfer `amount` of fungible asset from sender's primary store to receiver's primary store. @@ -183,8 +223,6 @@ module aptos_framework::primary_fungible_store { recipient: address, amount: u64, ) acquires DeriveRefPod { - // Create account if it does not yet exist, otherwise funds may get stuck in new accounts. - account::create_account_if_does_not_exist(recipient); let sender_store = ensure_primary_store_exists(signer::address_of(sender), metadata); // Check if the sender store object has been burnt or not. If so, unburn it first. may_be_unburn(sender, sender_store); @@ -238,13 +276,13 @@ module aptos_framework::primary_fungible_store { fungible_asset::withdraw_with_ref(transfer_ref, from_primary_store, amount) } - /// Deposit from the primary store of `owner` ignoring frozen flag. + /// Deposit to the primary store of `owner` ignoring frozen flag. public fun deposit_with_ref(transfer_ref: &TransferRef, owner: address, fa: FungibleAsset) acquires DeriveRefPod { - let from_primary_store = ensure_primary_store_exists( + let to_primary_store = ensure_primary_store_exists( owner, fungible_asset::transfer_ref_metadata(transfer_ref) ); - fungible_asset::deposit_with_ref(transfer_ref, from_primary_store, fa); + fungible_asset::deposit_with_ref(transfer_ref, to_primary_store, fa); } /// Transfer `amount` of FA from the primary store of `from` to that of `to` ignoring frozen flag. @@ -375,7 +413,7 @@ module aptos_framework::primary_fungible_store { // User 2 burns their primary store but should still be able to transfer afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object_with_transfer(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); // Balance still works assert!(balance(user_2_address, metadata) == 80, 0); @@ -399,34 +437,48 @@ module aptos_framework::primary_fungible_store { // User 2 burns their primary store but should still be able to withdraw afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object_with_transfer(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); let coins = withdraw(user_2, metadata, 70); assert!(balance(user_2_address, metadata) == 10, 0); deposit(user_2_address, coins); } - #[test(user_1 = @0xcafe, user_2 = @0xface)] - fun test_transfer_epilogue(user_1: &signer, user_2: &signer) acquires DeriveRefPod { - let (creator_ref, metadata) = create_test_token(user_1); - let (mint_ref, _, _) = init_test_metadata_with_primary_store_enabled(&creator_ref); - let user_1_address = signer::address_of(user_1); - let user_2_address = signer::address_of(user_2); + #[test(creator = @0xcafe, aaron = @0xface)] + fun test_permissioned_flow( + creator: &signer, + aaron: &signer, + ) acquires DeriveRefPod { + let (creator_ref, metadata) = create_test_token(creator); + let (mint_ref, _transfer_ref, _burn_ref) = init_test_metadata_with_primary_store_enabled(&creator_ref); + let creator_address = signer::address_of(creator); + let aaron_address = signer::address_of(aaron); + assert!(balance(creator_address, metadata) == 0, 1); + assert!(balance(aaron_address, metadata) == 0, 2); + mint(&mint_ref, creator_address, 100); + transfer(creator, metadata, aaron_address, 80); - // Mint 100 tokens to user_1 - mint(&mint_ref, user_1_address, 100); - assert!(balance(user_1_address, metadata) == 100, 1); - assert!(balance(user_2_address, metadata) == 0, 2); + let aaron_permission_handle = permissioned_signer::create_permissioned_handle(aaron); + let aaron_permission_signer = permissioned_signer::signer_from_permissioned_handle(&aaron_permission_handle); + grant_permission(aaron, &aaron_permission_signer, metadata, 10); - // First transfer: user_1 to user_2 - transfer(user_1, metadata, user_2_address, 50); - assert!(balance(user_1_address, metadata) == 50, 3); - assert!(balance(user_2_address, metadata) == 50, 4); + let fa = withdraw(&aaron_permission_signer, metadata, 10); + deposit(creator_address, fa); - // Second transfer: user_1 to user_2 - transfer(user_1, metadata, user_2_address, 30); - assert!(balance(user_1_address, metadata) == 20, 5); - assert!(balance(user_2_address, metadata) == 80, 6); - } + assert!(balance(creator_address, metadata) == 30, 3); + assert!(balance(aaron_address, metadata) == 70, 4); + + // Withdraw from creator and deposit back to aaron's account with permssioned signer. + let fa = withdraw(creator, metadata, 10); + deposit_with_signer(&aaron_permission_signer, fa); + // deposit_with_signer refills the permission, can now withdraw again. + let fa = withdraw(&aaron_permission_signer, metadata, 10); + deposit(creator_address, fa); + + assert!(balance(creator_address, metadata) == 30, 3); + assert!(balance(aaron_address, metadata) == 70, 4); + + permissioned_signer::destroy_permissioned_handle(aaron_permission_handle); + } } diff --git a/aptos-move/framework/aptos-framework/sources/randomness.move b/aptos-move/framework/aptos-framework/sources/randomness.move index e479b6e30c84b..cde8328e67795 100644 --- a/aptos-move/framework/aptos-framework/sources/randomness.move +++ b/aptos-move/framework/aptos-framework/sources/randomness.move @@ -92,7 +92,7 @@ module aptos_framework::randomness { let c = 0; while (c < n) { let blob = next_32_bytes(); - vector::append(&mut v, blob); + vector::reverse_append(&mut v, blob); c = c + 32; }; @@ -299,6 +299,8 @@ module aptos_framework::randomness { /// Generate a permutation of `[0, 1, ..., n-1]` uniformly at random. /// If n is 0, returns the empty vector. public fun permutation(n: u64): vector acquires PerBlockRandomness { + event::emit(RandomnessGeneratedEvent {}); + let values = vector[]; if(n == 0) { @@ -337,8 +339,6 @@ module aptos_framework::randomness { tail = tail - 1; }; - event::emit(RandomnessGeneratedEvent {}); - values } @@ -350,15 +350,15 @@ module aptos_framework::randomness { } /// Compute `(a + b) % m`, assuming `m >= 1, 0 <= a < m, 0<= b < m`. - inline fun safe_add_mod(a: u256, b: u256, m: u256): u256 { + fun safe_add_mod(a: u256, b: u256, m: u256): u256 { + let a_clone = a; let neg_b = m - b; - if (a < neg_b) { - a + b - } else { - a - neg_b - } + let a_less = a < neg_b; + take_first(if (a_less) { a + b } else { a_clone - neg_b }, if (!a_less) { a_clone - neg_b } else { a + b }) } + fun take_first(x: u256, _y: u256 ): u256 { x } + #[verify_only] fun safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256 { let neg_b = m - b; diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration.move b/aptos-move/framework/aptos-framework/sources/reconfiguration.move index 04a48b6466e75..6bf02a1d1b0f5 100644 --- a/aptos-move/framework/aptos-framework/sources/reconfiguration.move +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration.move @@ -13,7 +13,6 @@ module aptos_framework::reconfiguration { use aptos_framework::chain_status; use aptos_framework::reconfiguration_state; use aptos_framework::storage_gas; - use aptos_framework::transaction_fee; friend aptos_framework::aptos_governance; friend aptos_framework::block; @@ -131,20 +130,6 @@ module aptos_framework::reconfiguration { reconfiguration_state::on_reconfig_start(); - // Reconfiguration "forces the block" to end, as mentioned above. Therefore, we must process the collected fees - // explicitly so that staking can distribute them. - // - // This also handles the case when a validator is removed due to the governance proposal. In particular, removing - // the validator causes a reconfiguration. We explicitly process fees, i.e. we drain aggregatable coin and populate - // the fees table, prior to calling `on_new_epoch()`. That call, in turn, distributes transaction fees for all active - // and pending_inactive validators, which include any validator that is to be removed. - if (features::collect_and_distribute_gas_fees()) { - // All transactions after reconfiguration are Retry. Therefore, when the next - // block starts and tries to assign/burn collected fees it will be just 0 and - // nothing will be assigned. - transaction_fee::process_collected_fees(); - }; - // Call stake to compute the new validator set and distribute rewards and transaction fees. stake::on_new_epoch(); storage_gas::on_reconfig(); diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration.spec.move b/aptos-move/framework/aptos-framework/sources/reconfiguration.spec.move index 4bea1460fdb54..78aae7ea28d0d 100644 --- a/aptos-move/framework/aptos-framework/sources/reconfiguration.spec.move +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration.spec.move @@ -127,19 +127,16 @@ spec aptos_framework::reconfiguration { spec reconfigure { use aptos_framework::aptos_coin; - use aptos_framework::transaction_fee; use aptos_framework::staking_config; // TODO: set because of timeout (property proved) pragma verify = true; pragma verify_duration_estimate = 600; - requires exists(@aptos_framework); let success = !(chain_status::is_genesis() || timestamp::spec_now_microseconds() == 0 || !reconfiguration_enabled()) && timestamp::spec_now_microseconds() != global(@aptos_framework).last_reconfiguration_time; include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement; include success ==> aptos_coin::ExistsAptosCoin; - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; aborts_if false; // The ensure conditions of the reconfigure function are not fully written, because there is a new cycle in it, // but its existing ensure conditions satisfy hp. diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration_state.spec.move b/aptos-move/framework/aptos-framework/sources/reconfiguration_state.spec.move index 799669225fe96..376fa9e1999cf 100644 --- a/aptos-move/framework/aptos-framework/sources/reconfiguration_state.spec.move +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration_state.spec.move @@ -100,7 +100,7 @@ spec aptos_framework::reconfiguration_state { include copyable_any::type_name(global(@aptos_framework).variant).bytes == b"0x1::reconfiguration_state::StateActive" ==> copyable_any::UnpackAbortsIf { - x: global(@aptos_framework).variant + self: global(@aptos_framework).variant }; aborts_if copyable_any::type_name(global(@aptos_framework).variant).bytes != b"0x1::reconfiguration_state::StateActive"; diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.spec.move b/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.spec.move index cddde359d5a9f..6621ee8ad2872 100644 --- a/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.spec.move +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.spec.move @@ -28,10 +28,8 @@ spec aptos_framework::reconfiguration_with_dkg { use aptos_framework::chain_status; use std::signer; use std::features; - use aptos_framework::stake; use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; use aptos_framework::staking_config; use aptos_framework::config_buffer; use aptos_framework::version; @@ -46,8 +44,6 @@ spec aptos_framework::reconfiguration_with_dkg { requires chain_status::is_operating(); requires exists>(@aptos_framework); include staking_config::StakingRewardsConfigRequirement; - requires exists(@aptos_framework); - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires exists(@std); include config_buffer::OnNewEpochRequirement; include config_buffer::OnNewEpochRequirement; diff --git a/aptos-move/framework/aptos-framework/sources/resource_account.move b/aptos-move/framework/aptos-framework/sources/resource_account.move index 26ee8123ea0ac..22aaa2ad2f07e 100644 --- a/aptos-move/framework/aptos-framework/sources/resource_account.move +++ b/aptos-move/framework/aptos-framework/sources/resource_account.move @@ -181,7 +181,7 @@ module aptos_framework::resource_account { }; if (empty_container) { - let container = move_from(source_addr); + let container = move_from(source_addr); let Container { store } = container; simple_map::destroy_empty(store); }; @@ -250,6 +250,8 @@ module aptos_framework::resource_account { #[test(framework = @0x1, user = @0x2345)] #[expected_failure(abort_code = 0x60005, location = aptos_framework::coin)] public entry fun without_coin(framework: signer, user: signer) acquires Container { + let fa_features = vector[std::features::get_new_accounts_default_to_fa_store_feature(), std::features::get_new_accounts_default_to_fa_apt_store_feature(), std::features::get_operations_default_to_fa_apt_store_feature()]; + std::features::change_feature_flags_for_testing(&framework, vector[], fa_features); let user_addr = signer::address_of(&user); let (burn, mint) = aptos_framework::aptos_coin::initialize_for_test(&framework); aptos_framework::aptos_account::create_account(user_addr); diff --git a/aptos-move/framework/aptos-framework/sources/resource_account.spec.move b/aptos-move/framework/aptos-framework/sources/resource_account.spec.move index a6f4d6848a50a..af555386060b6 100644 --- a/aptos-move/framework/aptos-framework/sources/resource_account.spec.move +++ b/aptos-move/framework/aptos-framework/sources/resource_account.spec.move @@ -59,8 +59,8 @@ spec aptos_framework::resource_account { /// /// spec module { - pragma verify = true; - pragma aborts_if_is_strict; + pragma verify = false; + pragma aborts_if_is_partial; } spec create_resource_account( @@ -68,8 +68,10 @@ spec aptos_framework::resource_account { seed: vector, optional_auth_key: vector, ) { + use aptos_framework::create_signer; let source_addr = signer::address_of(origin); let resource_addr = account::spec_create_resource_address(source_addr, seed); + let resource = create_signer::spec_create_signer(resource_addr); include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIfWithoutAccountLimit; } @@ -90,8 +92,8 @@ spec aptos_framework::resource_account { include aptos_account::GuidAbortsIf{to: resource_addr}; include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIfWithoutAccountLimit; - //coin property - aborts_if coin::spec_is_account_registered(resource_addr) && coin_store_resource.frozen; + // TODO(fa_migration) + //aborts_if !coin::spec_is_account_registered(resource_addr) && coin_store_resource.frozen; /// [high-level-req-3] ensures exists>(resource_addr); } @@ -116,6 +118,8 @@ spec aptos_framework::resource_account { resource_signer_cap: account::SignerCapability, optional_auth_key: vector, ) { + pragma aborts_if_is_partial; + let resource_addr = signer::address_of(resource); /// [high-level-req-1] include RotateAccountAuthenticationKeyAndStoreCapabilityAbortsIf; @@ -155,11 +159,9 @@ spec aptos_framework::resource_account { let get = len(optional_auth_key) == 0; let account = global(source_addr); - requires source_addr != resource_addr; - aborts_if len(ZERO_AUTH_KEY) != 32; - include account::exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf; - include !account::exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr}; + include account::spec_exists_at(resource_addr) ==> account::CreateResourceAccountAbortsIf; + include !account::spec_exists_at(resource_addr) ==> account::CreateAccountAbortsIf {addr: resource_addr}; aborts_if get && !exists(source_addr); aborts_if exists(source_addr) && simple_map::spec_contains_key(container.store, resource_addr); @@ -174,6 +176,8 @@ spec aptos_framework::resource_account { resource: &signer, source_addr: address, ) : account::SignerCapability { + pragma aborts_if_is_partial; + /// [high-level-req-6] aborts_if !exists(source_addr); let resource_addr = signer::address_of(resource); diff --git a/aptos-move/framework/aptos-framework/sources/stake.move b/aptos-move/framework/aptos-framework/sources/stake.move index 9639ffa8ff07d..ab156ceff5af4 100644 --- a/aptos-move/framework/aptos-framework/sources/stake.move +++ b/aptos-move/framework/aptos-framework/sources/stake.move @@ -25,7 +25,7 @@ module aptos_framework::stake { use std::vector; use aptos_std::bls12381; use aptos_std::math64::min; - use aptos_std::table::{Self, Table}; + use aptos_std::table::Table; use aptos_framework::aptos_coin::AptosCoin; use aptos_framework::account; use aptos_framework::coin::{Self, Coin, MintCapability}; @@ -34,6 +34,7 @@ module aptos_framework::stake { use aptos_framework::system_addresses; use aptos_framework::staking_config::{Self, StakingConfig, StakingRewardsConfig}; use aptos_framework::chain_status; + use aptos_framework::permissioned_signer; friend aptos_framework::block; friend aptos_framework::genesis; @@ -79,8 +80,10 @@ module aptos_framework::stake { const EINVALID_LOCKUP: u64 = 18; /// Table to store collected transaction fees for each validator already exists. const EFEES_TABLE_ALREADY_EXISTS: u64 = 19; - /// Validator set change temporarily disabled because of in-progress reconfiguration. + /// Validator set change temporarily disabled because of in-progress reconfiguration. Please retry after 1 minute. const ERECONFIGURATION_IN_PROGRESS: u64 = 20; + /// Signer does not have permission to perform stake logic. + const ENO_STAKE_PERMISSION: u64 = 28; /// Validator status enum. We can switch to proper enum later once Move supports it. const VALIDATOR_STATUS_PENDING_ACTIVE: u64 = 1; @@ -204,6 +207,8 @@ module aptos_framework::stake { pool_address: address, } + struct StakeManagementPermission has copy, drop, store {} + #[event] struct RegisterValidatorCandidate has drop, store { pool_address: address, @@ -338,32 +343,23 @@ module aptos_framework::stake { pool_address: address, } - /// Stores transaction fees assigned to validators. All fees are distributed to validators - /// at the end of the epoch. + #[deprecated] + /// DEPRECATED struct ValidatorFees has key { fees_table: Table>, } - /// Initializes the resource storing information about collected transaction fees per validator. - /// Used by `transaction_fee.move` to initialize fee collection and distribution. - public(friend) fun initialize_validator_fees(aptos_framework: &signer) { - system_addresses::assert_aptos_framework(aptos_framework); + /// Permissions + inline fun check_stake_permission(s: &signer) { assert!( - !exists(@aptos_framework), - error::already_exists(EFEES_TABLE_ALREADY_EXISTS) + permissioned_signer::check_permission_exists(s, StakeManagementPermission {}), + error::permission_denied(ENO_STAKE_PERMISSION), ); - move_to(aptos_framework, ValidatorFees { fees_table: table::new() }); } - /// Stores the transaction fee collected to the specified validator address. - public(friend) fun add_transaction_fee(validator_addr: address, fee: Coin) acquires ValidatorFees { - let fees_table = &mut borrow_global_mut(@aptos_framework).fees_table; - if (table::contains(fees_table, validator_addr)) { - let collected_fee = table::borrow_mut(fees_table, validator_addr); - coin::merge(collected_fee, fee); - } else { - table::add(fees_table, validator_addr, fee); - } + /// Grant permission to mutate staking on behalf of the master signer. + public fun grant_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, StakeManagementPermission {}) } #[view] @@ -558,6 +554,7 @@ module aptos_framework::stake { operator: address, voter: address, ) acquires AllowedValidators, OwnerCapability, StakePool, ValidatorSet { + check_stake_permission(owner); initialize_owner(owner); move_to(owner, ValidatorConfig { consensus_pubkey: vector::empty(), @@ -587,8 +584,9 @@ module aptos_framework::stake { network_addresses: vector, fullnode_addresses: vector, ) acquires AllowedValidators { + check_stake_permission(account); // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks. - let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop( + let pubkey_from_pop = &bls12381::public_key_from_bytes_with_pop( consensus_pubkey, &proof_of_possession_from_bytes(proof_of_possession) ); @@ -604,6 +602,7 @@ module aptos_framework::stake { } fun initialize_owner(owner: &signer) acquires AllowedValidators { + check_stake_permission(owner); let owner_address = signer::address_of(owner); assert!(is_allowed(owner_address), error::not_found(EINELIGIBLE_VALIDATOR)); assert!(!stake_pool_exists(owner_address), error::already_exists(EALREADY_REGISTERED)); @@ -638,6 +637,7 @@ module aptos_framework::stake { /// Extract and return owner capability from the signing account. public fun extract_owner_cap(owner: &signer): OwnerCapability acquires OwnerCapability { + check_stake_permission(owner); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); move_from(owner_address) @@ -646,6 +646,7 @@ module aptos_framework::stake { /// Deposit `owner_cap` into `account`. This requires `account` to not already have ownership of another /// staking pool. public fun deposit_owner_cap(owner: &signer, owner_cap: OwnerCapability) { + check_stake_permission(owner); assert!(!exists(signer::address_of(owner)), error::not_found(EOWNER_CAP_ALREADY_EXISTS)); move_to(owner, owner_cap); } @@ -657,6 +658,7 @@ module aptos_framework::stake { /// Allows an owner to change the operator of the stake pool. public entry fun set_operator(owner: &signer, new_operator: address) acquires OwnerCapability, StakePool { + check_stake_permission(owner); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); let ownership_cap = borrow_global(owner_address); @@ -679,20 +681,21 @@ module aptos_framework::stake { new_operator, }, ); + } else { + event::emit_event( + &mut stake_pool.set_operator_events, + SetOperatorEvent { + pool_address, + old_operator, + new_operator, + }, + ); }; - - event::emit_event( - &mut stake_pool.set_operator_events, - SetOperatorEvent { - pool_address, - old_operator, - new_operator, - }, - ); } /// Allows an owner to change the delegated voter of the stake pool. public entry fun set_delegated_voter(owner: &signer, new_voter: address) acquires OwnerCapability, StakePool { + check_stake_permission(owner); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); let ownership_cap = borrow_global(owner_address); @@ -709,6 +712,7 @@ module aptos_framework::stake { /// Add `amount` of coins from the `account` owning the StakePool. public entry fun add_stake(owner: &signer, amount: u64) acquires OwnerCapability, StakePool, ValidatorSet { + check_stake_permission(owner); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); let ownership_cap = borrow_global(owner_address); @@ -730,7 +734,7 @@ module aptos_framework::stake { // Only track and validate voting power increase for active and pending_active validator. // Pending_inactive validator will be removed from the validator set in the next epoch. // Inactive validator's total stake will be tracked when they join the validator set. - let validator_set = borrow_global_mut(@aptos_framework); + let validator_set = borrow_global(@aptos_framework); // Search directly rather using get_validator_state to save on unnecessary loops. if (option::is_some(&find_validator(&validator_set.active_validators, pool_address)) || option::is_some(&find_validator(&validator_set.pending_active, pool_address))) { @@ -757,18 +761,20 @@ module aptos_framework::stake { amount_added: amount, }, ); + } else { + event::emit_event( + &mut stake_pool.add_stake_events, + AddStakeEvent { + pool_address, + amount_added: amount, + }, + ); }; - event::emit_event( - &mut stake_pool.add_stake_events, - AddStakeEvent { - pool_address, - amount_added: amount, - }, - ); } /// Move `amount` of coins from pending_inactive to active. public entry fun reactivate_stake(owner: &signer, amount: u64) acquires OwnerCapability, StakePool { + check_stake_permission(owner); assert_reconfig_not_in_progress(); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); @@ -799,14 +805,15 @@ module aptos_framework::stake { amount, }, ); + } else { + event::emit_event( + &mut stake_pool.reactivate_stake_events, + ReactivateStakeEvent { + pool_address, + amount, + }, + ); }; - event::emit_event( - &mut stake_pool.reactivate_stake_events, - ReactivateStakeEvent { - pool_address, - amount, - }, - ); } /// Rotate the consensus key of the validator, it'll take effect in next epoch. @@ -816,6 +823,7 @@ module aptos_framework::stake { new_consensus_pubkey: vector, proof_of_possession: vector, ) acquires StakePool, ValidatorConfig { + check_stake_permission(operator); assert_reconfig_not_in_progress(); assert_stake_pool_exists(pool_address); @@ -826,7 +834,7 @@ module aptos_framework::stake { let validator_info = borrow_global_mut(pool_address); let old_consensus_pubkey = validator_info.consensus_pubkey; // Checks the public key has a valid proof-of-possession to prevent rogue-key attacks. - let pubkey_from_pop = &mut bls12381::public_key_from_bytes_with_pop( + let pubkey_from_pop = &bls12381::public_key_from_bytes_with_pop( new_consensus_pubkey, &proof_of_possession_from_bytes(proof_of_possession) ); @@ -841,15 +849,16 @@ module aptos_framework::stake { new_consensus_pubkey, }, ); + } else { + event::emit_event( + &mut stake_pool.rotate_consensus_key_events, + RotateConsensusKeyEvent { + pool_address, + old_consensus_pubkey, + new_consensus_pubkey, + }, + ); }; - event::emit_event( - &mut stake_pool.rotate_consensus_key_events, - RotateConsensusKeyEvent { - pool_address, - old_consensus_pubkey, - new_consensus_pubkey, - }, - ); } /// Update the network and full node addresses of the validator. This only takes effect in the next epoch. @@ -859,6 +868,7 @@ module aptos_framework::stake { new_network_addresses: vector, new_fullnode_addresses: vector, ) acquires StakePool, ValidatorConfig { + check_stake_permission(operator); assert_reconfig_not_in_progress(); assert_stake_pool_exists(pool_address); let stake_pool = borrow_global_mut(pool_address); @@ -880,22 +890,23 @@ module aptos_framework::stake { new_fullnode_addresses, }, ); + } else { + event::emit_event( + &mut stake_pool.update_network_and_fullnode_addresses_events, + UpdateNetworkAndFullnodeAddressesEvent { + pool_address, + old_network_addresses, + new_network_addresses, + old_fullnode_addresses, + new_fullnode_addresses, + }, + ); }; - event::emit_event( - &mut stake_pool.update_network_and_fullnode_addresses_events, - UpdateNetworkAndFullnodeAddressesEvent { - pool_address, - old_network_addresses, - new_network_addresses, - old_fullnode_addresses, - new_fullnode_addresses, - }, - ); - } /// Similar to increase_lockup_with_cap but will use ownership capability from the signing account. public entry fun increase_lockup(owner: &signer) acquires OwnerCapability, StakePool { + check_stake_permission(owner); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); let ownership_cap = borrow_global(owner_address); @@ -923,15 +934,16 @@ module aptos_framework::stake { new_locked_until_secs, }, ); - }; - event::emit_event( - &mut stake_pool.increase_lockup_events, - IncreaseLockupEvent { - pool_address, - old_locked_until_secs, - new_locked_until_secs, - }, - ); + } else { + event::emit_event( + &mut stake_pool.increase_lockup_events, + IncreaseLockupEvent { + pool_address, + old_locked_until_secs, + new_locked_until_secs, + }, + ); + } } /// This can only called by the operator of the validator/staking pool. @@ -939,6 +951,7 @@ module aptos_framework::stake { operator: &signer, pool_address: address ) acquires StakePool, ValidatorConfig, ValidatorSet { + check_stake_permission(operator); assert!( staking_config::get_allow_validator_set_change(&staking_config::get()), error::invalid_argument(ENO_POST_GENESIS_VALIDATOR_SET_CHANGE_ALLOWED), @@ -976,7 +989,7 @@ module aptos_framework::stake { update_voting_power_increase(voting_power); // Add validator to pending_active, to be activated in the next epoch. - let validator_config = borrow_global_mut(pool_address); + let validator_config = borrow_global(pool_address); assert!(!vector::is_empty(&validator_config.consensus_pubkey), error::invalid_argument(EINVALID_PUBLIC_KEY)); // Validate the current validator set size has not exceeded the limit. @@ -992,15 +1005,17 @@ module aptos_framework::stake { if (std::features::module_event_migration_enabled()) { event::emit(JoinValidatorSet { pool_address }); - }; - event::emit_event( - &mut stake_pool.join_validator_set_events, - JoinValidatorSetEvent { pool_address }, - ); + } else { + event::emit_event( + &mut stake_pool.join_validator_set_events, + JoinValidatorSetEvent { pool_address }, + ); + } } /// Similar to unlock_with_cap but will use ownership capability from the signing account. public entry fun unlock(owner: &signer, amount: u64) acquires OwnerCapability, StakePool { + check_stake_permission(owner); assert_reconfig_not_in_progress(); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); @@ -1033,14 +1048,15 @@ module aptos_framework::stake { amount_unlocked: amount, }, ); + } else { + event::emit_event( + &mut stake_pool.unlock_stake_events, + UnlockStakeEvent { + pool_address, + amount_unlocked: amount, + }, + ); }; - event::emit_event( - &mut stake_pool.unlock_stake_events, - UnlockStakeEvent { - pool_address, - amount_unlocked: amount, - }, - ); } /// Withdraw from `account`'s inactive stake. @@ -1048,6 +1064,7 @@ module aptos_framework::stake { owner: &signer, withdraw_amount: u64 ) acquires OwnerCapability, StakePool, ValidatorSet { + check_stake_permission(owner); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); let ownership_cap = borrow_global(owner_address); @@ -1084,14 +1101,15 @@ module aptos_framework::stake { amount_withdrawn: withdraw_amount, }, ); + } else { + event::emit_event( + &mut stake_pool.withdraw_stake_events, + WithdrawStakeEvent { + pool_address, + amount_withdrawn: withdraw_amount, + }, + ); }; - event::emit_event( - &mut stake_pool.withdraw_stake_events, - WithdrawStakeEvent { - pool_address, - amount_withdrawn: withdraw_amount, - }, - ); coin::extract(&mut stake_pool.inactive, withdraw_amount) } @@ -1106,6 +1124,7 @@ module aptos_framework::stake { operator: &signer, pool_address: address ) acquires StakePool, ValidatorSet { + check_stake_permission(operator); assert_reconfig_not_in_progress(); let config = staking_config::get(); assert!( @@ -1148,13 +1167,14 @@ module aptos_framework::stake { if (std::features::module_event_migration_enabled()) { event::emit(LeaveValidatorSet { pool_address }); + } else { + event::emit_event( + &mut stake_pool.leave_validator_set_events, + LeaveValidatorSetEvent { + pool_address, + }, + ); }; - event::emit_event( - &mut stake_pool.leave_validator_set_events, - LeaveValidatorSetEvent { - pool_address, - }, - ); }; } @@ -1232,7 +1252,7 @@ module aptos_framework::stake { /// 4. The validator's voting power in the validator set is updated to be the corresponding staking pool's voting /// power. public(friend) fun on_new_epoch( - ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { let validator_set = borrow_global_mut(@aptos_framework); let config = staking_config::get(); let validator_perf = borrow_global_mut(@aptos_framework); @@ -1273,8 +1293,8 @@ module aptos_framework::stake { }) { let old_validator_info = vector::borrow_mut(&mut validator_set.active_validators, i); let pool_address = old_validator_info.addr; - let validator_config = borrow_global_mut(pool_address); - let stake_pool = borrow_global_mut(pool_address); + let validator_config = borrow_global(pool_address); + let stake_pool = borrow_global(pool_address); let new_validator_info = generate_validator_info(pool_address, stake_pool, *validator_config); // A validator needs at least the min stake required to join the validator set. @@ -1354,7 +1374,7 @@ module aptos_framework::stake { } - public fun next_validator_consensus_infos(): vector acquires ValidatorSet, ValidatorPerformance, StakePool, ValidatorFees, ValidatorConfig { + public fun next_validator_consensus_infos(): vector acquires ValidatorSet, ValidatorPerformance, StakePool, ValidatorConfig { // Init. let cur_validator_set = borrow_global(@aptos_framework); let staking_config = staking_config::get(); @@ -1409,25 +1429,16 @@ module aptos_framework::stake { 0 }; - let cur_fee = 0; - if (features::collect_and_distribute_gas_fees()) { - let fees_table = &borrow_global(@aptos_framework).fees_table; - if (table::contains(fees_table, candidate.addr)) { - let fee_coin = table::borrow(fees_table, candidate.addr); - cur_fee = coin::value(fee_coin); - } - }; - let lockup_expired = get_reconfig_start_time_secs() >= stake_pool.locked_until_secs; spec { - assume cur_active + cur_pending_active + cur_reward + cur_fee <= MAX_U64; - assume cur_active + cur_pending_inactive + cur_pending_active + cur_reward + cur_fee <= MAX_U64; + assume cur_active + cur_pending_active + cur_reward <= MAX_U64; + assume cur_active + cur_pending_inactive + cur_pending_active + cur_reward <= MAX_U64; }; let new_voting_power = cur_active + if (lockup_expired) { 0 } else { cur_pending_inactive } + cur_pending_active - + cur_reward + cur_fee; + + cur_reward; if (new_voting_power >= minimum_stake) { let config = *borrow_global(candidate.addr); @@ -1546,7 +1557,7 @@ module aptos_framework::stake { validator_perf: &ValidatorPerformance, pool_address: address, staking_config: &StakingConfig, - ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorFees { + ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig { let stake_pool = borrow_global_mut(pool_address); let validator_config = borrow_global(pool_address); let cur_validator_perf = vector::borrow(&validator_perf.validators, validator_config.validator_index); @@ -1579,15 +1590,6 @@ module aptos_framework::stake { // Pending active stake can now be active. coin::merge(&mut stake_pool.active, coin::extract_all(&mut stake_pool.pending_active)); - // Additionally, distribute transaction fees. - if (features::collect_and_distribute_gas_fees()) { - let fees_table = &mut borrow_global_mut(@aptos_framework).fees_table; - if (table::contains(fees_table, pool_address)) { - let coin = table::remove(fees_table, pool_address); - coin::merge(&mut stake_pool.active, coin); - }; - }; - // Pending inactive stake is only fully unlocked and moved into inactive if the current lockup cycle has expired let current_lockup_expiration = stake_pool.locked_until_secs; if (get_reconfig_start_time_secs() >= current_lockup_expiration) { @@ -1599,14 +1601,15 @@ module aptos_framework::stake { if (std::features::module_event_migration_enabled()) { event::emit(DistributeRewards { pool_address, rewards_amount }); + } else { + event::emit_event( + &mut stake_pool.distribute_rewards_events, + DistributeRewardsEvent { + pool_address, + rewards_amount, + }, + ); }; - event::emit_event( - &mut stake_pool.distribute_rewards_events, - DistributeRewardsEvent { - pool_address, - rewards_amount, - }, - ); } /// Assuming we are in a middle of a reconfiguration (no matter it is immediate or async), get its start time. @@ -1791,6 +1794,8 @@ module aptos_framework::stake { public fun initialize_for_test(aptos_framework: &signer) { reconfiguration_state::initialize(aptos_framework); initialize_for_test_custom(aptos_framework, 100, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 100, 1000000); + // In the test environment, the periodical_reward_rate_decrease feature is initially turned off. + features::change_feature_flags_for_testing(aptos_framework, vector[], vector[features::get_periodical_reward_rate_decrease_feature()]); } #[test_only] @@ -1800,7 +1805,7 @@ module aptos_framework::stake { operator: &signer, pool_address: address, should_end_epoch: bool, - ) acquires AptosCoinCapabilities, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AptosCoinCapabilities, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { let pk_bytes = bls12381::public_key_to_bytes(pk); let pop_bytes = bls12381::proof_of_possession_to_bytes(pop); rotate_consensus_key(operator, pool_address, pk_bytes, pop_bytes); @@ -1812,7 +1817,7 @@ module aptos_framework::stake { #[test_only] public fun fast_forward_to_unlock(pool_address: address) - acquires AptosCoinCapabilities, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + acquires AptosCoinCapabilities, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { let expiration_time = get_lockup_secs(pool_address); timestamp::update_global_time_for_test_secs(expiration_time); end_epoch(); @@ -1851,6 +1856,9 @@ module aptos_framework::stake { store_aptos_coin_mint_cap(aptos_framework, mint_cap); coin::destroy_burn_cap(burn_cap); }; + + // In the test environment, the periodical_reward_rate_decrease feature is initially turned off. + features::change_feature_flags_for_testing(aptos_framework, vector[], vector[features::get_periodical_reward_rate_decrease_feature()]); } // This function assumes the stake module already the capability to mint aptos coins. @@ -1881,11 +1889,9 @@ module aptos_framework::stake { amount: u64, should_join_validator_set: bool, should_end_epoch: bool, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { let validator_address = signer::address_of(validator); - if (!account::exists_at(signer::address_of(validator))) { - account::create_account_for_test(validator_address); - }; + account::create_account_for_test(validator_address); let pk_bytes = bls12381::public_key_to_bytes(public_key); let pop_bytes = bls12381::proof_of_possession_to_bytes(proof_of_possession); @@ -1977,7 +1983,7 @@ module aptos_framework::stake { public entry fun test_inactive_validator_can_add_stake_if_exceeding_max_allowed( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, false, false); @@ -1992,7 +1998,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 100000); // Have one validator join the set to ensure the validator set is not empty when main validator joins. let (_sk_1, pk_1, pop_1) = generate_identity(); @@ -2011,7 +2017,7 @@ module aptos_framework::stake { public entry fun test_active_validator_cannot_add_stake_if_exceeding_max_allowed( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); // Validator joins validator set and waits for epoch end so it's in the validator set. let (_sk, pk, pop) = generate_identity(); @@ -2026,7 +2032,7 @@ module aptos_framework::stake { public entry fun test_active_validator_with_pending_inactive_stake_cannot_add_stake_if_exceeding_max_allowed( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); // Validator joins validator set and waits for epoch end so it's in the validator set. let (_sk, pk, pop) = generate_identity(); @@ -2046,7 +2052,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk_1, pk_1, pop_1) = generate_identity(); let (_sk_2, pk_2, pop_2) = generate_identity(); @@ -2064,7 +2070,7 @@ module aptos_framework::stake { public entry fun test_end_to_end( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, true, true); @@ -2121,7 +2127,7 @@ module aptos_framework::stake { public entry fun test_inactive_validator_with_existing_lockup_join_validator_set( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, false, false); @@ -2147,7 +2153,7 @@ module aptos_framework::stake { public entry fun test_cannot_reduce_lockup( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, false, false); @@ -2166,7 +2172,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { // Only 50% voting power increase is allowed in each epoch. initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 50); let (_sk_1, pk_1, pop_1) = generate_identity(); @@ -2188,7 +2194,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 10000); // Need 1 validator to be in the active validator set so joining limit works. let (_sk_1, pk_1, pop_1) = generate_identity(); @@ -2210,7 +2216,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { // 100% voting power increase is allowed in each epoch. initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 100); // Need 1 validator to be in the active validator set so joining limit works. @@ -2230,7 +2236,7 @@ module aptos_framework::stake { public entry fun test_pending_active_validator_leaves_validator_set( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); // Validator joins but epoch hasn't ended, so the validator is still pending_active. let (_sk, pk, pop) = generate_identity(); @@ -2254,7 +2260,7 @@ module aptos_framework::stake { public entry fun test_active_validator_cannot_add_more_stake_than_limit_in_multiple_epochs( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { // Only 50% voting power increase is allowed in each epoch. initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 50); // Add initial stake and join the validator set. @@ -2276,7 +2282,7 @@ module aptos_framework::stake { public entry fun test_active_validator_cannot_add_more_stake_than_limit( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { // Only 50% voting power increase is allowed in each epoch. initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 50); let (_sk, pk, pop) = generate_identity(); @@ -2290,7 +2296,7 @@ module aptos_framework::stake { public entry fun test_active_validator_unlock_partial_stake( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { // Reward rate = 10%. initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 100); let (_sk, pk, pop) = generate_identity(); @@ -2316,7 +2322,7 @@ module aptos_framework::stake { public entry fun test_active_validator_can_withdraw_all_stake_and_rewards_at_once( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, true, true); @@ -2353,7 +2359,7 @@ module aptos_framework::stake { public entry fun test_active_validator_unlocking_more_than_available_stake_should_cap( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, false, false); @@ -2367,7 +2373,7 @@ module aptos_framework::stake { public entry fun test_active_validator_withdraw_should_cap_by_inactive_stake( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); // Initial balance = 900 (idle) + 100 (staked) = 1000. let (_sk, pk, pop) = generate_identity(); @@ -2392,7 +2398,7 @@ module aptos_framework::stake { public entry fun test_active_validator_can_reactivate_pending_inactive_stake( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, true, true); @@ -2411,7 +2417,7 @@ module aptos_framework::stake { public entry fun test_active_validator_reactivate_more_than_available_pending_inactive_stake_should_cap( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, true, true); @@ -2428,7 +2434,7 @@ module aptos_framework::stake { public entry fun test_active_validator_having_insufficient_remaining_stake_after_withdrawal_gets_kicked( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, true, true); @@ -2456,7 +2462,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk_1, pk_1, pop_1) = generate_identity(); let (_sk_2, pk_2, pop_2) = generate_identity(); @@ -2500,7 +2506,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk_1, pk_1, pop_1) = generate_identity(); let (_sk_2, pk_2, pop_2) = generate_identity(); @@ -2531,7 +2537,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { // Only 50% voting power increase is allowed in each epoch. initialize_for_test_custom(aptos_framework, 50, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 10, 50); let (_sk_1, pk_1, pop_1) = generate_identity(); @@ -2552,7 +2558,7 @@ module aptos_framework::stake { validator_1: &signer, validator_2: &signer, validator_3: &signer - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { let validator_1_address = signer::address_of(validator_1); let validator_2_address = signer::address_of(validator_2); let validator_3_address = signer::address_of(validator_3); @@ -2646,7 +2652,7 @@ module aptos_framework::stake { public entry fun test_delegated_staking_with_owner_cap( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test_custom(aptos_framework, 100, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 100, 100); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 0, false, false); @@ -2699,7 +2705,7 @@ module aptos_framework::stake { public entry fun test_validator_cannot_join_post_genesis( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test_custom(aptos_framework, 100, 10000, LOCKUP_CYCLE_SECONDS, false, 1, 100, 100); // Joining the validator set should fail as post genesis validator set change is not allowed. @@ -2712,7 +2718,7 @@ module aptos_framework::stake { public entry fun test_invalid_pool_address( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, true, true); @@ -2724,7 +2730,7 @@ module aptos_framework::stake { public entry fun test_validator_cannot_leave_post_genesis( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test_custom(aptos_framework, 100, 10000, LOCKUP_CYCLE_SECONDS, false, 1, 100, 100); let (_sk, pk, pop) = generate_identity(); initialize_test_validator(&pk, &pop, validator, 100, false, false); @@ -2753,7 +2759,7 @@ module aptos_framework::stake { validator_3: &signer, validator_4: &signer, validator_5: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet { let v1_addr = signer::address_of(validator_1); let v2_addr = signer::address_of(validator_2); let v3_addr = signer::address_of(validator_3); @@ -2826,7 +2832,7 @@ module aptos_framework::stake { validator_3: &signer, validator_4: &signer, validator_5: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { let v1_addr = signer::address_of(validator_1); let v2_addr = signer::address_of(validator_2); let v3_addr = signer::address_of(validator_3); @@ -2889,7 +2895,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let validator_1_address = signer::address_of(validator_1); @@ -2937,7 +2943,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let genesis_time_in_secs = timestamp::now_seconds(); @@ -2993,7 +2999,7 @@ module aptos_framework::stake { public entry fun test_update_performance_statistics_should_not_fail_due_to_out_of_bounds( aptos_framework: &signer, validator: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let validator_address = signer::address_of(validator); @@ -3116,7 +3122,7 @@ module aptos_framework::stake { aptos_framework: &signer, validator_1: &signer, validator_2: &signer, - ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires AllowedValidators, OwnerCapability, StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { initialize_for_test(aptos_framework); let (_sk_1, pk_1, pop_1) = generate_identity(); let (_sk_2, pk_2, pop_2) = generate_identity(); @@ -3133,7 +3139,7 @@ module aptos_framework::stake { #[test_only] public fun end_epoch( - ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + ) acquires StakePool, AptosCoinCapabilities, ValidatorConfig, ValidatorPerformance, ValidatorSet { // Set the number of blocks to 1, to give out rewards to non-failing validators. set_validator_perf_at_least_one_block(); timestamp::fast_forward_seconds(EPOCH_DURATION); @@ -3213,74 +3219,4 @@ module aptos_framework::stake { let (numerator, denominator) = staking_config::get_reward_rate(&staking_config::get()); amount + amount * numerator / denominator } - - #[test_only] - public fun get_validator_fee(validator_addr: address): u64 acquires ValidatorFees { - let fees_table = &borrow_global(@aptos_framework).fees_table; - let coin = table::borrow(fees_table, validator_addr); - coin::value(coin) - } - - #[test_only] - public fun assert_no_fees_for_validator(validator_addr: address) acquires ValidatorFees { - let fees_table = &borrow_global(@aptos_framework).fees_table; - assert!(!table::contains(fees_table, validator_addr), 0); - } - - #[test_only] - const COLLECT_AND_DISTRIBUTE_GAS_FEES: u64 = 6; - - #[test(aptos_framework = @0x1, validator_1 = @0x123, validator_2 = @0x234, validator_3 = @0x345)] - fun test_distribute_validator_fees( - aptos_framework: &signer, - validator_1: &signer, - validator_2: &signer, - validator_3: &signer, - ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { - // Make sure that fees collection and distribution is enabled. - features::change_feature_flags_for_testing(aptos_framework, vector[COLLECT_AND_DISTRIBUTE_GAS_FEES], vector[]); - assert!(features::collect_and_distribute_gas_fees(), 0); - - // Initialize staking and validator fees table. - initialize_for_test(aptos_framework); - initialize_validator_fees(aptos_framework); - - let validator_1_address = signer::address_of(validator_1); - let validator_2_address = signer::address_of(validator_2); - let validator_3_address = signer::address_of(validator_3); - - // Validators join the set and epoch ends. - let (_sk_1, pk_1, pop_1) = generate_identity(); - let (_sk_2, pk_2, pop_2) = generate_identity(); - let (_sk_3, pk_3, pop_3) = generate_identity(); - initialize_test_validator(&pk_1, &pop_1, validator_1, 100, true, false); - initialize_test_validator(&pk_2, &pop_2, validator_2, 100, true, false); - initialize_test_validator(&pk_3, &pop_3, validator_3, 100, true, true); - - // Next, simulate fees collection during three blocks, where proposers are - // validators 1, 2, and 1 again. - add_transaction_fee(validator_1_address, mint_coins(100)); - add_transaction_fee(validator_2_address, mint_coins(500)); - add_transaction_fee(validator_1_address, mint_coins(200)); - - // Fess have to be assigned to the right validators, but not - // distributed yet. - assert!(get_validator_fee(validator_1_address) == 300, 0); - assert!(get_validator_fee(validator_2_address) == 500, 0); - assert_no_fees_for_validator(validator_3_address); - assert_validator_state(validator_1_address, 100, 0, 0, 0, 2); - assert_validator_state(validator_2_address, 100, 0, 0, 0, 1); - assert_validator_state(validator_3_address, 100, 0, 0, 0, 0); - - end_epoch(); - - // Epoch ended. Validators must have recieved their rewards and, most importantly, - // their fees. - assert_no_fees_for_validator(validator_1_address); - assert_no_fees_for_validator(validator_2_address); - assert_no_fees_for_validator(validator_3_address); - assert_validator_state(validator_1_address, 401, 0, 0, 0, 2); - assert_validator_state(validator_2_address, 601, 0, 0, 0, 1); - assert_validator_state(validator_3_address, 101, 0, 0, 0, 0); - } } diff --git a/aptos-move/framework/aptos-framework/sources/stake.spec.move b/aptos-move/framework/aptos-framework/sources/stake.spec.move index 44101fbcb134c..29eea04f434ce 100644 --- a/aptos-move/framework/aptos-framework/sources/stake.spec.move +++ b/aptos-move/framework/aptos-framework/sources/stake.spec.move @@ -42,6 +42,7 @@ spec aptos_framework::stake { // ----------------- spec module { pragma verify = true; + pragma aborts_if_is_partial; // The validator set should satisfy its desired invariant. invariant [suspendable] exists(@aptos_framework) ==> validator_set_is_valid(); // After genesis, `AptosCoinCapabilities`, `ValidatorPerformance` and `ValidatorSet` exist. @@ -118,13 +119,6 @@ spec aptos_framework::stake { // Function specifications // ----------------------- - spec initialize_validator_fees(aptos_framework: &signer) { - let aptos_addr = signer::address_of(aptos_framework); - aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); - aborts_if exists(aptos_addr); - ensures exists(aptos_addr); - } - spec initialize_validator( account: &signer, consensus_pubkey: vector, @@ -132,6 +126,11 @@ spec aptos_framework::stake { network_addresses: vector, fullnode_addresses: vector, ){ + pragma verify = false; + + include AbortsIfSignerPermissionStake { + s: account + }; let pubkey_from_pop = bls12381::spec_public_key_from_bytes_with_pop( consensus_pubkey, proof_of_possession_from_bytes(proof_of_possession) @@ -175,8 +174,11 @@ spec aptos_framework::stake { ) { // This function casue timeout (property proved) - // pragma verify_duration_estimate = 120; + pragma verify_duration_estimate = 60; pragma disable_invariants_in_body; + include AbortsIfSignerPermissionStake { + s: operator + }; aborts_if !staking_config::get_allow_validator_set_change(staking_config::get()); aborts_if !exists(pool_address); aborts_if !exists(pool_address); @@ -230,6 +232,9 @@ spec aptos_framework::stake { { // TODO(fa_migration) pragma verify = false; + include AbortsIfSignerPermissionStake { + s: owner + }; aborts_if reconfiguration_state::spec_is_in_progress(); let addr = signer::address_of(owner); let ownership_cap = global(addr); @@ -269,6 +274,9 @@ spec aptos_framework::stake { ) { pragma disable_invariants_in_body; requires chain_status::is_operating(); + include AbortsIfSignerPermissionStake { + s: operator + }; aborts_if reconfiguration_state::spec_is_in_progress(); let config = staking_config::get(); aborts_if !staking_config::get_allow_validator_set_change(config); @@ -304,12 +312,18 @@ spec aptos_framework::stake { spec extract_owner_cap(owner: &signer): OwnerCapability { // TODO: set because of timeout (property proved) pragma verify_duration_estimate = 300; + include AbortsIfSignerPermissionStake { + s: owner + }; let owner_address = signer::address_of(owner); aborts_if !exists(owner_address); ensures !exists(owner_address); } spec deposit_owner_cap(owner: &signer, owner_cap: OwnerCapability) { + include AbortsIfSignerPermissionStake { + s: owner + }; let owner_address = signer::address_of(owner); aborts_if exists(owner_address); ensures exists(owner_address); @@ -358,6 +372,9 @@ spec aptos_framework::stake { new_network_addresses: vector, new_fullnode_addresses: vector, ) { + include AbortsIfSignerPermissionStake { + s: operator + }; let pre_stake_pool = global(pool_address); let post validator_info = global(pool_address); modifies global(pool_address); @@ -404,6 +421,9 @@ spec aptos_framework::stake { new_consensus_pubkey: vector, proof_of_possession: vector, ) { + include AbortsIfSignerPermissionStake { + s: operator + }; let pre_stake_pool = global(pool_address); let post validator_info = global(pool_address); aborts_if reconfiguration_state::spec_is_in_progress(); @@ -496,17 +516,10 @@ spec aptos_framework::stake { let post post_stake_pool = global(pool_address); let post post_active_value = post_stake_pool.active.value; let post post_pending_inactive_value = post_stake_pool.pending_inactive.value; - let fees_table = global(@aptos_framework).fees_table; - let post post_fees_table = global(@aptos_framework).fees_table; let post post_inactive_value = post_stake_pool.inactive.value; ensures post_stake_pool.pending_active.value == 0; // the amount stored in the stake pool should not changed after the update - ensures if (features::spec_is_enabled(features::COLLECT_AND_DISTRIBUTE_GAS_FEES) && table::spec_contains(fees_table, pool_address)) { - !table::spec_contains(post_fees_table, pool_address) && - post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value + table::spec_get(fees_table, pool_address).value - } else { - post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value - }; + ensures post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value; // when current lockup cycle has expired, pending inactive should be fully unlocked and moved into inactive ensures if (spec_get_reconfig_start_time_secs() >= stake_pool.locked_until_secs) { post_pending_inactive_value == 0 && @@ -516,6 +529,13 @@ spec aptos_framework::stake { }; } + spec schema AbortsIfSignerPermissionStake { + use aptos_framework::permissioned_signer; + s: signer; + let perm = StakeManagementPermission {}; + aborts_if !permissioned_signer::spec_check_permission_exists(s, perm); + } + spec schema UpdateStakePoolAbortsIf { use aptos_std::type_info; @@ -527,7 +547,6 @@ spec aptos_framework::stake { aborts_if global(pool_address).validator_index >= len(validator_perf.validators); let aptos_addr = type_info::type_of().account_address; - aborts_if !exists(aptos_addr); let stake_pool = global(pool_address); @@ -536,6 +555,7 @@ spec aptos_framework::stake { } spec distribute_rewards { + pragma aborts_if_is_partial; include ResourceRequirement; requires rewards_rate <= MAX_REWARDS_RATE; requires rewards_rate_denominator > 0; @@ -601,10 +621,15 @@ spec aptos_framework::stake { } } + spec fun spec_get_lockup_secs(pool_address: address): u64 { + global(pool_address).locked_until_secs + } + spec calculate_rewards_amount { pragma opaque; // TODO: set because of timeout (property proved) pragma verify_duration_estimate = 300; + pragma verify = false; requires rewards_rate <= MAX_REWARDS_RATE; requires rewards_rate_denominator > 0; requires rewards_rate <= rewards_rate_denominator; @@ -682,7 +707,7 @@ spec aptos_framework::stake { spec add_stake_with_cap { pragma disable_invariants_in_body; - pragma verify_duration_estimate = 300; + pragma verify = false; include ResourceRequirement; let amount = coins.value; aborts_if reconfiguration_state::spec_is_in_progress(); @@ -690,10 +715,13 @@ spec aptos_framework::stake { } spec add_stake { - // TODO: These function passed locally however failed in github CI - pragma verify_duration_estimate = 120; + // TODO: fix + pragma verify = false; // TODO(fa_migration) pragma aborts_if_is_partial; + include AbortsIfSignerPermissionStake { + s: owner + }; aborts_if reconfiguration_state::spec_is_in_progress(); include ResourceRequirement; include AddStakeAbortsIfAndEnsures; @@ -707,7 +735,11 @@ spec aptos_framework::stake { ) { // TODO: These function failed in github CI pragma verify_duration_estimate = 120; - + pragma verify = false; + pragma aborts_if_is_partial; + include AbortsIfSignerPermissionStake { + s: owner + }; include ResourceRequirement; let addr = signer::address_of(owner); ensures global(addr) == ValidatorConfig { @@ -726,20 +758,6 @@ spec aptos_framework::stake { active == initial_stake_amount; } - spec add_transaction_fee(validator_addr: address, fee: Coin) { - aborts_if !exists(@aptos_framework); - let fees_table = global(@aptos_framework).fees_table; - let post post_fees_table = global(@aptos_framework).fees_table; - let collected_fee = table::spec_get(fees_table, validator_addr); - let post post_collected_fee = table::spec_get(post_fees_table, validator_addr); - ensures if (table::spec_contains(fees_table, validator_addr)) { - post_collected_fee.value == collected_fee.value + fee.value - } else { - table::spec_contains(post_fees_table, validator_addr) && - table::spec_get(post_fees_table, validator_addr) == fee - }; - } - spec update_voting_power_increase(increase_amount: u64) { requires !reconfiguration_state::spec_is_in_progress(); aborts_if !exists(@aptos_framework); @@ -920,7 +938,6 @@ spec aptos_framework::stake { requires exists(@aptos_framework); requires exists(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled(); requires exists(@aptos_framework); - requires exists(@aptos_framework); } // Adding helper function in staking_config leads to an unexpected error diff --git a/aptos-move/framework/aptos-framework/sources/staking_contract.move b/aptos-move/framework/aptos-framework/sources/staking_contract.move index 8de013987e7b9..5e9ba4355b995 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_contract.move +++ b/aptos-move/framework/aptos-framework/sources/staking_contract.move @@ -60,7 +60,7 @@ module aptos_framework::staking_contract { const EINSUFFICIENT_ACTIVE_STAKE_TO_WITHDRAW: u64 = 7; /// Caller must be either the staker, operator, or beneficiary. const ENOT_STAKER_OR_OPERATOR_OR_BENEFICIARY: u64 = 8; - /// Chaning beneficiaries for operators is not supported. + /// Changing beneficiaries for operators is not supported. const EOPERATOR_BENEFICIARY_CHANGE_NOT_SUPPORTED: u64 = 9; /// Maximum number of distributions a stake pool can support. @@ -418,11 +418,12 @@ module aptos_framework::staking_contract { if (std::features::module_event_migration_enabled()) { emit(CreateStakingContract { operator, voter, pool_address, principal, commission_percentage }); + } else { + emit_event( + &mut store.create_staking_contract_events, + CreateStakingContractEvent { operator, voter, pool_address, principal, commission_percentage }, + ); }; - emit_event( - &mut store.create_staking_contract_events, - CreateStakingContractEvent { operator, voter, pool_address, principal, commission_percentage }, - ); pool_address } @@ -442,11 +443,12 @@ module aptos_framework::staking_contract { let pool_address = staking_contract.pool_address; if (std::features::module_event_migration_enabled()) { emit(AddStake { operator, pool_address, amount }); + } else { + emit_event( + &mut store.add_stake_events, + AddStakeEvent { operator, pool_address, amount }, + ); }; - emit_event( - &mut store.add_stake_events, - AddStakeEvent { operator, pool_address, amount }, - ); } /// Convenient function to allow the staker to update the voter address in a staking contract they made. @@ -462,12 +464,12 @@ module aptos_framework::staking_contract { if (std::features::module_event_migration_enabled()) { emit(UpdateVoter { operator, pool_address, old_voter, new_voter }); + } else { + emit_event( + &mut store.update_voter_events, + UpdateVoterEvent { operator, pool_address, old_voter, new_voter }, + ); }; - emit_event( - &mut store.update_voter_events, - UpdateVoterEvent { operator, pool_address, old_voter, new_voter }, - ); - } /// Convenient function to allow the staker to reset their stake pool's lockup period to start now. @@ -482,8 +484,9 @@ module aptos_framework::staking_contract { if (std::features::module_event_migration_enabled()) { emit(ResetLockup { operator, pool_address }); + } else { + emit_event(&mut store.reset_lockup_events, ResetLockupEvent { operator, pool_address }); }; - emit_event(&mut store.reset_lockup_events, ResetLockupEvent { operator, pool_address }); } /// Convenience function to allow a staker to update the commission percentage paid to the operator. @@ -526,11 +529,12 @@ module aptos_framework::staking_contract { emit( UpdateCommission { staker: staker_address, operator, old_commission_percentage, new_commission_percentage } ); + } else { + emit_event( + &mut borrow_global_mut(staker_address).update_commission_events, + UpdateCommissionEvent { staker: staker_address, operator, old_commission_percentage, new_commission_percentage } + ); }; - emit_event( - &mut borrow_global_mut(staker_address).update_commission_events, - UpdateCommissionEvent { staker: staker_address, operator, old_commission_percentage, new_commission_percentage } - ); } /// Unlock commission amount from the stake pool. Operator needs to wait for the amount to become withdrawable @@ -593,11 +597,12 @@ module aptos_framework::staking_contract { let pool_address = staking_contract.pool_address; if (std::features::module_event_migration_enabled()) { emit(RequestCommission { operator, pool_address, accumulated_rewards, commission_amount }); + } else { + emit_event( + request_commission_events, + RequestCommissionEvent { operator, pool_address, accumulated_rewards, commission_amount }, + ); }; - emit_event( - request_commission_events, - RequestCommissionEvent { operator, pool_address, accumulated_rewards, commission_amount }, - ); commission_amount } @@ -648,11 +653,12 @@ module aptos_framework::staking_contract { let pool_address = staking_contract.pool_address; if (std::features::module_event_migration_enabled()) { emit(UnlockStake { pool_address, operator, amount, commission_paid }); + } else { + emit_event( + &mut store.unlock_stake_events, + UnlockStakeEvent { pool_address, operator, amount, commission_paid }, + ); }; - emit_event( - &mut store.unlock_stake_events, - UnlockStakeEvent { pool_address, operator, amount, commission_paid }, - ); } /// Unlock all accumulated rewards since the last recorded principals. @@ -718,11 +724,12 @@ module aptos_framework::staking_contract { simple_map::add(staking_contracts, new_operator, staking_contract); if (std::features::module_event_migration_enabled()) { emit(SwitchOperator { pool_address, old_operator, new_operator }); + } else { + emit_event( + &mut store.switch_operator_events, + SwitchOperatorEvent { pool_address, old_operator, new_operator } + ); }; - emit_event( - &mut store.switch_operator_events, - SwitchOperatorEvent { pool_address, old_operator, new_operator } - ); } /// Allows an operator to change its beneficiary. Any existing unpaid commission rewards will be paid to the new @@ -785,7 +792,7 @@ module aptos_framework::staking_contract { // Buy all recipients out of the distribution pool. while (pool_u64::shareholders_count(distribution_pool) > 0) { let recipients = pool_u64::shareholders(distribution_pool); - let recipient = *vector::borrow(&mut recipients, 0); + let recipient = *vector::borrow(&recipients, 0); let current_shares = pool_u64::shares(distribution_pool, recipient); let amount_to_distribute = pool_u64::redeem_shares(distribution_pool, recipient, current_shares); // If the recipient is the operator, send the commission to the beneficiary instead. @@ -796,11 +803,12 @@ module aptos_framework::staking_contract { if (std::features::module_event_migration_enabled()) { emit(Distribute { operator, pool_address, recipient, amount: amount_to_distribute }); + } else { + emit_event( + distribute_events, + DistributeEvent { operator, pool_address, recipient, amount: amount_to_distribute } + ); }; - emit_event( - distribute_events, - DistributeEvent { operator, pool_address, recipient, amount: amount_to_distribute } - ); }; // In case there's any dust left, send them all to the staker. @@ -815,7 +823,7 @@ module aptos_framework::staking_contract { /// Assert that a staking_contract exists for the staker/operator pair. fun assert_staking_contract_exists(staker: address, operator: address) acquires Store { assert!(exists(staker), error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_STAKER)); - let staking_contracts = &mut borrow_global_mut(staker).staking_contracts; + let staking_contracts = &borrow_global(staker).staking_contracts; assert!( simple_map::contains_key(staking_contracts, &operator), error::not_found(ENO_STAKING_CONTRACT_FOUND_FOR_OPERATOR), @@ -839,11 +847,12 @@ module aptos_framework::staking_contract { let pool_address = staking_contract.pool_address; if (std::features::module_event_migration_enabled()) { emit(AddDistribution { operator, pool_address, amount: coins_amount }); + } else { + emit_event( + add_distribution_events, + AddDistributionEvent { operator, pool_address, amount: coins_amount } + ); }; - emit_event( - add_distribution_events, - AddDistributionEvent { operator, pool_address, amount: coins_amount } - ); } /// Calculate accumulated rewards and commissions since last update. @@ -961,12 +970,6 @@ module aptos_framework::staking_contract { #[test_only] const MAXIMUM_STAKE: u64 = 100000000000000000; // 1B APT coins with 8 decimals. - #[test_only] - const MODULE_EVENT: u64 = 26; - - #[test_only] - const OPERATOR_BENEFICIARY_CHANGE: u64 = 39; - #[test_only] public fun setup(aptos_framework: &signer, staker: &signer, operator: &signer, initial_balance: u64) { // Reward rate of 0.1% per epoch. @@ -1006,7 +1009,8 @@ module aptos_framework::staking_contract { // Voter is initially set to operator but then updated to be staker. create_staking_contract(staker, operator_address, operator_address, amount, commission, vector::empty()); - std::features::change_feature_flags_for_testing(aptos_framework, vector[MODULE_EVENT, OPERATOR_BENEFICIARY_CHANGE], vector[]); + // In the test environment, the periodical_reward_rate_decrease feature is initially turned off. + std::features::change_feature_flags_for_testing(aptos_framework, vector[], vector[features::get_periodical_reward_rate_decrease_feature()]); } #[test(aptos_framework = @0x1, staker = @0x123, operator = @0x234)] diff --git a/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move b/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move index fa6f54b391ba6..9966bc97ad711 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move +++ b/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move @@ -72,6 +72,10 @@ spec aptos_framework::staking_contract { pragma aborts_if_is_strict; } + spec StakingContract { + invariant commission_percentage >= 0 && commission_percentage <= 100; + } + spec stake_pool_address(staker: address, operator: address): address { include ContractExistsAbortsIf; let staking_contracts = global(staker).staking_contracts; @@ -97,7 +101,6 @@ spec aptos_framework::staking_contract { spec staking_contract_amounts(staker: address, operator: address): (u64, u64, u64) { // TODO: set because of timeout (property proved). pragma verify_duration_estimate = 120; - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; let staking_contracts = global(staker).staking_contracts; let staking_contract = simple_map::spec_get(staking_contracts, operator); @@ -132,6 +135,10 @@ spec aptos_framework::staking_contract { ensures result == spec_staking_contract_exists(staker, operator); } + spec get_expected_stake_pool_address { + pragma aborts_if_is_partial; + } + spec fun spec_staking_contract_exists(staker: address, operator: address): bool { if (!exists(staker)) { false @@ -228,6 +235,7 @@ spec aptos_framework::staking_contract { let post new_delegated_voter = global(pool_address).delegated_voter; // property 4: The staker may update the voter of a staking contract, enabling them // to modify the assigned voter address and ensure it accurately reflects their desired choice. + /// [high-level-req-4] ensures new_delegated_voter == new_voter; } @@ -275,8 +283,6 @@ spec aptos_framework::staking_contract { // TODO: Call `update_distribution_pool` and could not verify `update_distribution_pool`. // TODO: Set because of timeout (estimate unknown). pragma verify = false; - /// [high-level-req-4] - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; let staker_address = signer::address_of(staker); let staking_contracts = global(staker_address).staking_contracts; let staking_contract = simple_map::spec_get(staking_contracts, operator); @@ -287,7 +293,6 @@ spec aptos_framework::staking_contract { // TODO: Call `update_distribution_pool` and could not verify `update_distribution_pool`. // TODO: Set because of timeout (estimate unknown). pragma verify = false; - requires amount > 0; let staker_address = signer::address_of(staker); include ContractExistsAbortsIf { staker: staker_address }; } @@ -441,8 +446,9 @@ spec aptos_framework::staking_contract { } /// The Account exists under the staker. - /// The guid_creation_num of the ccount resource is up to MAX_U64. + /// The guid_creation_num of the account resource is up to MAX_U64. spec new_staking_contracts_holder(staker: &signer): Store { + pragma aborts_if_is_partial; include NewStakingContractsHolderAbortsIf; } @@ -450,10 +456,10 @@ spec aptos_framework::staking_contract { staker: signer; let addr = signer::address_of(staker); - let account = global(addr); - aborts_if !exists(addr); - aborts_if account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; - aborts_if account.guid_creation_num + 9 > MAX_U64; + // let account = global(addr); + // aborts_if !exists(addr); + // aborts_if account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; + // aborts_if account.guid_creation_num + 9 > MAX_U64; } /// The Store exists under the staker. @@ -588,7 +594,6 @@ spec aptos_framework::staking_contract { requires exists( @aptos_framework ) || !std::features::spec_periodical_reward_rate_decrease_enabled(); - requires exists(@aptos_framework); requires exists(@aptos_framework); requires exists(@aptos_framework); } diff --git a/aptos-move/framework/aptos-framework/sources/staking_proxy.move b/aptos-move/framework/aptos-framework/sources/staking_proxy.move index 26d1aa33372ce..76ffbd2182a63 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_proxy.move +++ b/aptos-move/framework/aptos-framework/sources/staking_proxy.move @@ -1,11 +1,31 @@ module aptos_framework::staking_proxy { + use std::error; use std::signer; use std::vector; + use aptos_framework::permissioned_signer; use aptos_framework::stake; use aptos_framework::staking_contract; use aptos_framework::vesting; + struct StakeProxyPermission has copy, drop, store {} + + /// Signer does not have permission to perform stake proxy logic. + const ENO_STAKE_PERMISSION: u64 = 28; + + /// Permissions + inline fun check_stake_proxy_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, StakeProxyPermission {}), + error::permission_denied(ENO_STAKE_PERMISSION), + ); + } + + /// Grant permission to mutate staking on behalf of the master signer. + public fun grant_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, StakeProxyPermission {}) + } + public entry fun set_operator(owner: &signer, old_operator: address, new_operator: address) { set_vesting_contract_operator(owner, old_operator, new_operator); set_staking_contract_operator(owner, old_operator, new_operator); @@ -19,6 +39,7 @@ module aptos_framework::staking_proxy { } public entry fun set_vesting_contract_operator(owner: &signer, old_operator: address, new_operator: address) { + check_stake_proxy_permission(owner); let owner_address = signer::address_of(owner); let vesting_contracts = &vesting::vesting_contracts(owner_address); vector::for_each_ref(vesting_contracts, |vesting_contract| { @@ -31,6 +52,7 @@ module aptos_framework::staking_proxy { } public entry fun set_staking_contract_operator(owner: &signer, old_operator: address, new_operator: address) { + check_stake_proxy_permission(owner); let owner_address = signer::address_of(owner); if (staking_contract::staking_contract_exists(owner_address, old_operator)) { let current_commission_percentage = staking_contract::commission_percentage(owner_address, old_operator); @@ -39,6 +61,7 @@ module aptos_framework::staking_proxy { } public entry fun set_stake_pool_operator(owner: &signer, new_operator: address) { + check_stake_proxy_permission(owner); let owner_address = signer::address_of(owner); if (stake::stake_pool_exists(owner_address)) { stake::set_operator(owner, new_operator); @@ -46,6 +69,7 @@ module aptos_framework::staking_proxy { } public entry fun set_vesting_contract_voter(owner: &signer, operator: address, new_voter: address) { + check_stake_proxy_permission(owner); let owner_address = signer::address_of(owner); let vesting_contracts = &vesting::vesting_contracts(owner_address); vector::for_each_ref(vesting_contracts, |vesting_contract| { @@ -57,6 +81,7 @@ module aptos_framework::staking_proxy { } public entry fun set_staking_contract_voter(owner: &signer, operator: address, new_voter: address) { + check_stake_proxy_permission(owner); let owner_address = signer::address_of(owner); if (staking_contract::staking_contract_exists(owner_address, operator)) { staking_contract::update_voter(owner, operator, new_voter); @@ -64,6 +89,7 @@ module aptos_framework::staking_proxy { } public entry fun set_stake_pool_voter(owner: &signer, new_voter: address) { + check_stake_proxy_permission(owner); if (stake::stake_pool_exists(signer::address_of(owner))) { stake::set_delegated_voter(owner, new_voter); }; diff --git a/aptos-move/framework/aptos-framework/sources/staking_proxy.spec.move b/aptos-move/framework/aptos-framework/sources/staking_proxy.spec.move index a1da120f0eed4..9f72369b39c0c 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_proxy.spec.move +++ b/aptos-move/framework/aptos-framework/sources/staking_proxy.spec.move @@ -41,7 +41,14 @@ spec aptos_framework::staking_proxy { /// spec module { pragma verify = true; - pragma aborts_if_is_strict; + pragma aborts_if_is_partial; + } + + spec grant_permission { + pragma aborts_if_is_partial; + aborts_if !permissioned_signer::spec_is_permissioned_signer(permissioned_signer); + aborts_if permissioned_signer::spec_is_permissioned_signer(master); + aborts_if signer::address_of(master) != signer::address_of(permissioned_signer); } /// Aborts if conditions of SetStakePoolOperator are not met @@ -58,6 +65,7 @@ spec aptos_framework::staking_proxy { spec set_voter(owner: &signer, operator: address, new_voter: address) { // TODO: Can't verify `set_vesting_contract_voter` pragma aborts_if_is_partial; + pragma verify_duration_estimate = 120; include SetStakingContractVoter; include SetStakePoolVoterAbortsIf; } @@ -122,12 +130,21 @@ spec aptos_framework::staking_proxy { /// One of them are not exists spec set_stake_pool_operator(owner: &signer, new_operator: address) { include SetStakePoolOperator; + include AbortsIfSignerPermissionStakeProxy { + s: owner + }; + include exists(signer::address_of(owner)) ==> stake::AbortsIfSignerPermissionStake { + s:owner + }; } spec schema SetStakePoolOperator { owner: &signer; new_operator: address; + include AbortsIfSignerPermissionStakeProxy { + s: owner + }; let owner_address = signer::address_of(owner); let ownership_cap = borrow_global(owner_address); let pool_address = ownership_cap.pool_address; @@ -137,6 +154,9 @@ spec aptos_framework::staking_proxy { spec set_staking_contract_voter(owner: &signer, operator: address, new_voter: address) { include SetStakingContractVoter; + include AbortsIfSignerPermissionStakeProxy { + s: owner + }; } /// Make sure staking_contract_exists first @@ -166,16 +186,32 @@ spec aptos_framework::staking_proxy { spec set_stake_pool_voter(owner: &signer, new_voter: address) { include SetStakePoolVoterAbortsIf; + include AbortsIfSignerPermissionStakeProxy { + s: owner + }; + include exists(signer::address_of(owner)) ==> stake::AbortsIfSignerPermissionStake { + s:owner + }; } spec schema SetStakePoolVoterAbortsIf { owner: &signer; new_voter: address; + include AbortsIfSignerPermissionStakeProxy { + s: owner + }; let owner_address = signer::address_of(owner); let ownership_cap = global(owner_address); let pool_address = ownership_cap.pool_address; aborts_if stake::stake_pool_exists(owner_address) && !(exists(owner_address) && stake::stake_pool_exists(pool_address)); ensures stake::stake_pool_exists(owner_address) ==> global(pool_address).delegated_voter == new_voter; } + + spec schema AbortsIfSignerPermissionStakeProxy { + use aptos_framework::permissioned_signer; + s: signer; + let perm = StakeProxyPermission {}; + aborts_if !permissioned_signer::spec_check_permission_exists(s, perm); + } } diff --git a/aptos-move/framework/aptos-framework/sources/system_addresses.move b/aptos-move/framework/aptos-framework/sources/system_addresses.move index 038e1552cb5a9..49b82099aad4f 100644 --- a/aptos-move/framework/aptos-framework/sources/system_addresses.move +++ b/aptos-move/framework/aptos-framework/sources/system_addresses.move @@ -1,9 +1,6 @@ module aptos_framework::system_addresses { use std::error; use std::signer; - use std::features::get_decommission_core_resources_enabled; - #[test_only] - use std::features::change_feature_flags_for_testing; /// The address/account did not correspond to the core resource address const ENOT_CORE_RESOURCE_ADDRESS: u64 = 1; @@ -23,12 +20,7 @@ module aptos_framework::system_addresses { } public fun is_core_resource_address(addr: address): bool { - // Check if the feature flag for decommissioning core resources is enabled. - if (get_decommission_core_resources_enabled()) { - false - } else { - addr == @core_resources - } + addr == @core_resources } public fun assert_aptos_framework(account: &signer) { @@ -87,22 +79,4 @@ module aptos_framework::system_addresses { public fun is_reserved_address(addr: address): bool { is_aptos_framework_address(addr) || is_vm_address(addr) } - - #[test(aptos_framework = @0x1, core_resources = @0xA550C18)] - public entry fun test_core_resource_check_returns_false_with_flag_enabled(aptos_framework: signer, core_resources: address) { - // Enable the feature flag for testing - change_feature_flags_for_testing(&aptos_framework, vector[222], vector[]); - - // Assert that is_core_resource_address returns false - assert!(!is_core_resource_address(core_resources), 0); - } - - #[test(aptos_framework = @0x1, core_resources = @0xA550C18)] - public entry fun test_core_resource_check_returns_true_without_flag(aptos_framework: signer, core_resources: address) { - // Disable the feature flag for testing - change_feature_flags_for_testing(&aptos_framework, vector[], vector[222]); - - // Assert that is_core_resource_address returns true - assert!(is_core_resource_address(core_resources), 0); - } } diff --git a/aptos-move/framework/aptos-framework/sources/transaction_context.move b/aptos-move/framework/aptos-framework/sources/transaction_context.move index c3bad25371cdc..74ce7c1140db2 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_context.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_context.move @@ -182,6 +182,23 @@ module aptos_framework::transaction_context { payload.entry_function_payload } + #[test_only] + public fun new_entry_function_payload( + account_address: address, + module_name: String, + function_name: String, + ty_args_names: vector, + args: vector>, + ): EntryFunctionPayload { + EntryFunctionPayload { + account_address, + module_name, + function_name, + ty_args_names, + args, + } + } + #[test()] fun test_auid_uniquess() { use std::vector; diff --git a/aptos-move/framework/aptos-framework/sources/transaction_context.spec.move b/aptos-move/framework/aptos-framework/sources/transaction_context.spec.move index f9837e26e6a75..07487cb0919ae 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_context.spec.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_context.spec.move @@ -58,11 +58,13 @@ spec aptos_framework::transaction_context { } spec generate_unique_address(): address { pragma opaque; + aborts_if [abstract] false; ensures [abstract] result == spec_generate_unique_address(); } spec fun spec_generate_unique_address(): address; spec generate_auid_address(): address { pragma opaque; + aborts_if [abstract] false; // property 3: Generating the unique address should return a vector with 32 bytes, if the auid feature flag is enabled. /// [high-level-req-3] ensures [abstract] result == spec_generate_unique_address(); diff --git a/aptos-move/framework/aptos-framework/sources/transaction_fee.move b/aptos-move/framework/aptos-framework/sources/transaction_fee.move index 1a4704e7814c5..677c134411563 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_fee.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_fee.move @@ -1,9 +1,8 @@ -/// This module provides an interface to burn or collect and redistribute transaction fees. +// This module provides an interface to burn or collect and redistribute transaction fees. module aptos_framework::transaction_fee { - use aptos_framework::coin::{Self, AggregatableCoin, BurnCapability, Coin, MintCapability}; + use aptos_framework::coin::{Self, AggregatableCoin, BurnCapability, MintCapability}; use aptos_framework::aptos_account; use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::stake; use aptos_framework::fungible_asset::BurnRef; use aptos_framework::system_addresses; use std::error; @@ -29,15 +28,6 @@ module aptos_framework::transaction_fee { const EFA_GAS_CHARGING_NOT_ENABLED: u64 = 5; - const EATOMIC_BRIDGE_NOT_ENABLED: u64 = 6; - - const ECOPY_CAPS_SHOT: u64 = 7; - - const ENATIVE_BRIDGE_NOT_ENABLED: u64 = 8; - - /// The one shot copy capabilities call - struct CopyCapabilitiesOneShot has key {} - /// Stores burn capability to burn the gas fees. struct AptosCoinCapabilities has key { burn_cap: BurnCapability, @@ -53,14 +43,6 @@ module aptos_framework::transaction_fee { mint_cap: MintCapability, } - /// Stores information about the block proposer and the amount of fees - /// collected when executing the block. - struct CollectedFeesPerBlock has key { - amount: AggregatableCoin, - proposer: Option
, - burn_percentage: u8, - } - #[event] /// Breakdown of fee charge and refund for a transaction. /// The structure is: @@ -94,168 +76,19 @@ module aptos_framework::transaction_fee { storage_fee_refund_octas: u64, } - /// Initializes the resource storing information about gas fees collection and - /// distribution. Should be called by on-chain governance. - public fun initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8) { - system_addresses::assert_aptos_framework(aptos_framework); - assert!( - !exists(@aptos_framework), - error::already_exists(EALREADY_COLLECTING_FEES) - ); - assert!(burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE)); - - // Make sure stakng module is aware of transaction fees collection. - stake::initialize_validator_fees(aptos_framework); - - // Initially, no fees are collected and the block proposer is not set. - let collected_fees = CollectedFeesPerBlock { - amount: coin::initialize_aggregatable_coin(aptos_framework), - proposer: option::none(), - burn_percentage, - }; - move_to(aptos_framework, collected_fees); - } - - fun is_fees_collection_enabled(): bool { - exists(@aptos_framework) - } - - /// Sets the burn percentage for collected fees to a new value. Should be called by on-chain governance. - public fun upgrade_burn_percentage( - aptos_framework: &signer, - new_burn_percentage: u8 - ) acquires AptosCoinCapabilities, CollectedFeesPerBlock { - system_addresses::assert_aptos_framework(aptos_framework); - assert!(new_burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE)); - - // Prior to upgrading the burn percentage, make sure to process collected - // fees. Otherwise we would use the new (incorrect) burn_percentage when - // processing fees later! - process_collected_fees(); - - if (is_fees_collection_enabled()) { - // Upgrade has no effect unless fees are being collected. - let burn_percentage = &mut borrow_global_mut(@aptos_framework).burn_percentage; - *burn_percentage = new_burn_percentage - } - } - - /// Registers the proposer of the block for gas fees collection. This function - /// can only be called at the beginning of the block. - public(friend) fun register_proposer_for_fee_collection(proposer_addr: address) acquires CollectedFeesPerBlock { - if (is_fees_collection_enabled()) { - let collected_fees = borrow_global_mut(@aptos_framework); - let _ = option::swap_or_fill(&mut collected_fees.proposer, proposer_addr); - } - } - - /// Burns a specified fraction of the coin. - fun burn_coin_fraction(coin: &mut Coin, burn_percentage: u8) acquires AptosCoinCapabilities { - assert!(burn_percentage <= 100, error::out_of_range(EINVALID_BURN_PERCENTAGE)); - - let collected_amount = coin::value(coin); - spec { - // We assume that `burn_percentage * collected_amount` does not overflow. - assume burn_percentage * collected_amount <= MAX_U64; - }; - let amount_to_burn = (burn_percentage as u64) * collected_amount / 100; - if (amount_to_burn > 0) { - let coin_to_burn = coin::extract(coin, amount_to_burn); - coin::burn( - coin_to_burn, - &borrow_global(@aptos_framework).burn_cap, - ); - } - } - - /// Calculates the fee which should be distributed to the block proposer at the - /// end of an epoch, and records it in the system. This function can only be called - /// at the beginning of the block or during reconfiguration. - public(friend) fun process_collected_fees() acquires AptosCoinCapabilities, CollectedFeesPerBlock { - if (!is_fees_collection_enabled()) { - return - }; - let collected_fees = borrow_global_mut(@aptos_framework); - - // If there are no collected fees, only unset the proposer. See the rationale for - // setting proposer to option::none() below. - if (coin::is_aggregatable_coin_zero(&collected_fees.amount)) { - if (option::is_some(&collected_fees.proposer)) { - let _ = option::extract(&mut collected_fees.proposer); - }; - return - }; - - // Otherwise get the collected fee, and check if it can distributed later. - let coin = coin::drain_aggregatable_coin(&mut collected_fees.amount); - if (option::is_some(&collected_fees.proposer)) { - // Extract the address of proposer here and reset it to option::none(). This - // is particularly useful to avoid any undesired side-effects where coins are - // collected but never distributed or distributed to the wrong account. - // With this design, processing collected fees enforces that all fees will be burnt - // unless the proposer is specified in the block prologue. When we have a governance - // proposal that triggers reconfiguration, we distribute pending fees and burn the - // fee for the proposal. Otherwise, that fee would be leaked to the next block. - let proposer = option::extract(&mut collected_fees.proposer); - - // Since the block can be produced by the VM itself, we have to make sure we catch - // this case. - if (proposer == @vm_reserved) { - burn_coin_fraction(&mut coin, 100); - coin::destroy_zero(coin); - return - }; - - burn_coin_fraction(&mut coin, collected_fees.burn_percentage); - stake::add_transaction_fee(proposer, coin); - return - }; - - // If checks did not pass, simply burn all collected coins and return none. - burn_coin_fraction(&mut coin, 100); - coin::destroy_zero(coin) - } - - /// Burns a specified amount of AptosCoin from an address. - /// - /// @param core_resource The signer representing the core resource account. - /// @param account The address from which to burn AptosCoin. - /// @param fee The amount of AptosCoin to burn. - /// @abort If the burn capability is not available. - public fun burn_from(aptos_framework: &signer, account: address, fee: u64) acquires AptosFABurnCapabilities, AptosCoinCapabilities { - system_addresses::assert_aptos_framework(aptos_framework); - if (exists(@aptos_framework)) { - let burn_ref = &borrow_global(@aptos_framework).burn_ref; - aptos_account::burn_from_fungible_store(burn_ref, account, fee); - } else { - let burn_cap = &borrow_global(@aptos_framework).burn_cap; - if (features::operations_default_to_fa_apt_store_enabled()) { - let (burn_ref, burn_receipt) = coin::get_paired_burn_ref(burn_cap); - aptos_account::burn_from_fungible_store(&burn_ref, account, fee); - coin::return_paired_burn_ref(burn_ref, burn_receipt); - } else { - coin::burn_from( - account, - fee, - burn_cap, - ); - }; - }; - } - /// Burn transaction fees in epilogue. public(friend) fun burn_fee(account: address, fee: u64) acquires AptosFABurnCapabilities, AptosCoinCapabilities { if (exists(@aptos_framework)) { let burn_ref = &borrow_global(@aptos_framework).burn_ref; - aptos_account::burn_from_fungible_store(burn_ref, account, fee); + aptos_account::burn_from_fungible_store_for_gas(burn_ref, account, fee); } else { let burn_cap = &borrow_global(@aptos_framework).burn_cap; if (features::operations_default_to_fa_apt_store_enabled()) { let (burn_ref, burn_receipt) = coin::get_paired_burn_ref(burn_cap); - aptos_account::burn_from_fungible_store(&burn_ref, account, fee); + aptos_account::burn_from_fungible_store_for_gas(&burn_ref, account, fee); coin::return_paired_burn_ref(burn_ref, burn_receipt); } else { - coin::burn_from( + coin::burn_from_for_gas( account, fee, burn_cap, @@ -268,18 +101,7 @@ module aptos_framework::transaction_fee { public(friend) fun mint_and_refund(account: address, refund: u64) acquires AptosCoinMintCapability { let mint_cap = &borrow_global(@aptos_framework).mint_cap; let refund_coin = coin::mint(refund, mint_cap); - coin::force_deposit(account, refund_coin); - } - - /// Collect transaction fees in epilogue. - public(friend) fun collect_fee(account: address, fee: u64) acquires CollectedFeesPerBlock { - let collected_fees = borrow_global_mut(@aptos_framework); - - // Here, we are always optimistic and always collect fees. If the proposer is not set, - // or we cannot redistribute fees later for some reason (e.g. account cannot receive AptoCoin) - // we burn them all at once. This way we avoid having a check for every transaction epilogue. - let collected_amount = &mut collected_fees.amount; - coin::collect_into_aggregatable_coin(account, fee, collected_amount); + coin::deposit_for_gas_fee(account, refund_coin); } /// Only called during genesis. @@ -310,266 +132,39 @@ module aptos_framework::transaction_fee { move_to(aptos_framework, AptosCoinMintCapability { mint_cap }) } - /// Copy Mint and Burn capabilities over to bridge - /// Can only be called once after which it will assert - public fun copy_capabilities_for_bridge(aptos_framework: &signer) : (MintCapability, BurnCapability) - acquires AptosCoinCapabilities, AptosCoinMintCapability { - system_addresses::assert_aptos_framework(aptos_framework); - assert!(features::abort_atomic_bridge_enabled(), EATOMIC_BRIDGE_NOT_ENABLED); - assert!(!exists(@aptos_framework), ECOPY_CAPS_SHOT); - move_to(aptos_framework, CopyCapabilitiesOneShot{}); - ( - borrow_global(@aptos_framework).mint_cap, - borrow_global(@aptos_framework).burn_cap - ) - } - - /// Copy Mint and Burn capabilities over to bridge - /// Can only be called once after which it will assert - public fun copy_capabilities_for_native_bridge(aptos_framework: &signer) : (MintCapability, BurnCapability) - acquires AptosCoinCapabilities, AptosCoinMintCapability { - system_addresses::assert_aptos_framework(aptos_framework); - assert!(features::abort_native_bridge_enabled(), ENATIVE_BRIDGE_NOT_ENABLED); - assert!(!exists(@aptos_framework), ECOPY_CAPS_SHOT); - move_to(aptos_framework, CopyCapabilitiesOneShot{}); - ( - borrow_global(@aptos_framework).mint_cap, - borrow_global(@aptos_framework).burn_cap - ) - } - - #[deprecated] - public fun initialize_storage_refund(_: &signer) { - abort error::not_implemented(ENO_LONGER_SUPPORTED) - } - // Called by the VM after epilogue. fun emit_fee_statement(fee_statement: FeeStatement) { event::emit(fee_statement) } - #[test_only] - use aptos_framework::aggregator_factory; - #[test_only] - use aptos_framework::object; - - #[test(aptos_framework = @aptos_framework)] - fun test_initialize_fee_collection_and_distribution(aptos_framework: signer) acquires CollectedFeesPerBlock { - aggregator_factory::initialize_aggregator_factory_for_test(&aptos_framework); - initialize_fee_collection_and_distribution(&aptos_framework, 25); - - // Check struct has been published. - assert!(exists(@aptos_framework), 0); - - // Check that initial balance is 0 and there is no proposer set. - let collected_fees = borrow_global(@aptos_framework); - assert!(coin::is_aggregatable_coin_zero(&collected_fees.amount), 0); - assert!(option::is_none(&collected_fees.proposer), 0); - assert!(collected_fees.burn_percentage == 25, 0); - } - - #[test(aptos_framework = @aptos_framework)] - fun test_burn_fraction_calculation(aptos_framework: signer) acquires AptosCoinCapabilities { - use aptos_framework::aptos_coin; - let (burn_cap, mint_cap) = aptos_coin::initialize_for_test(&aptos_framework); - store_aptos_coin_burn_cap(&aptos_framework, burn_cap); - - let c1 = coin::mint(100, &mint_cap); - assert!(*option::borrow(&coin::supply()) == 100, 0); - - // Burning 25%. - burn_coin_fraction(&mut c1, 25); - assert!(coin::value(&c1) == 75, 0); - assert!(*option::borrow(&coin::supply()) == 75, 0); - - // Burning 0%. - burn_coin_fraction(&mut c1, 0); - assert!(coin::value(&c1) == 75, 0); - assert!(*option::borrow(&coin::supply()) == 75, 0); - - // Burning remaining 100%. - burn_coin_fraction(&mut c1, 100); - assert!(coin::value(&c1) == 0, 0); - assert!(*option::borrow(&coin::supply()) == 0, 0); - - coin::destroy_zero(c1); - let c2 = coin::mint(10, &mint_cap); - assert!(*option::borrow(&coin::supply()) == 10, 0); - - burn_coin_fraction(&mut c2, 5); - assert!(coin::value(&c2) == 10, 0); - assert!(*option::borrow(&coin::supply()) == 10, 0); - - burn_coin_fraction(&mut c2, 100); - coin::destroy_zero(c2); - coin::destroy_burn_cap(burn_cap); - coin::destroy_mint_cap(mint_cap); - } - - #[test(aptos_framework = @aptos_framework, alice = @0xa11ce, bob = @0xb0b, carol = @0xca101)] - fun test_fees_distribution( - aptos_framework: signer, - alice: signer, - bob: signer, - carol: signer, - ) acquires AptosCoinCapabilities, CollectedFeesPerBlock { - use std::signer; - use aptos_framework::aptos_account; - use aptos_framework::aptos_coin; - - // Initialization. - let (burn_cap, mint_cap) = aptos_coin::initialize_for_test(&aptos_framework); - store_aptos_coin_burn_cap(&aptos_framework, burn_cap); - initialize_fee_collection_and_distribution(&aptos_framework, 10); - - // Create dummy accounts. - let alice_addr = signer::address_of(&alice); - let bob_addr = signer::address_of(&bob); - let carol_addr = signer::address_of(&carol); - aptos_account::create_account(alice_addr); - aptos_account::create_account(bob_addr); - aptos_account::create_account(carol_addr); - assert!(object::object_address(&coin::ensure_paired_metadata()) == @aptos_fungible_asset, 0); - coin::deposit(alice_addr, coin::mint(10000, &mint_cap)); - coin::deposit(bob_addr, coin::mint(10000, &mint_cap)); - coin::deposit(carol_addr, coin::mint(10000, &mint_cap)); - assert!(*option::borrow(&coin::supply()) == 30000, 0); - - // Block 1 starts. - process_collected_fees(); - register_proposer_for_fee_collection(alice_addr); - - // Check that there was no fees distribution in the first block. - let collected_fees = borrow_global(@aptos_framework); - assert!(coin::is_aggregatable_coin_zero(&collected_fees.amount), 0); - assert!(*option::borrow(&collected_fees.proposer) == alice_addr, 0); - assert!(*option::borrow(&coin::supply()) == 30000, 0); - - // Simulate transaction fee collection - here we simply collect some fees from Bob. - collect_fee(bob_addr, 100); - collect_fee(bob_addr, 500); - collect_fee(bob_addr, 400); - - // Now Bob must have 1000 less in his account. Alice and Carol have the same amounts. - assert!(coin::balance(alice_addr) == 10000, 0); - assert!(coin::balance(bob_addr) == 9000, 0); - assert!(coin::balance(carol_addr) == 10000, 0); - - // Block 2 starts. - process_collected_fees(); - register_proposer_for_fee_collection(bob_addr); - - // Collected fees from Bob must have been assigned to Alice. - assert!(stake::get_validator_fee(alice_addr) == 900, 0); - assert!(coin::balance(alice_addr) == 10000, 0); - assert!(coin::balance(bob_addr) == 9000, 0); - assert!(coin::balance(carol_addr) == 10000, 0); - - // Also, aggregator coin is drained and total supply is slightly changed (10% of 1000 is burnt). - let collected_fees = borrow_global(@aptos_framework); - assert!(coin::is_aggregatable_coin_zero(&collected_fees.amount), 0); - assert!(*option::borrow(&collected_fees.proposer) == bob_addr, 0); - assert!(*option::borrow(&coin::supply()) == 29900, 0); - - // Simulate transaction fee collection one more time. - collect_fee(bob_addr, 5000); - collect_fee(bob_addr, 4000); - - assert!(coin::balance(alice_addr) == 10000, 0); - assert!(coin::balance(bob_addr) == 0, 0); - assert!(coin::balance(carol_addr) == 10000, 0); - - // Block 3 starts. - process_collected_fees(); - register_proposer_for_fee_collection(carol_addr); - - // Collected fees should have been assigned to Bob because he was the peoposer. - assert!(stake::get_validator_fee(alice_addr) == 900, 0); - assert!(coin::balance(alice_addr) == 10000, 0); - assert!(stake::get_validator_fee(bob_addr) == 8100, 0); - assert!(coin::balance(bob_addr) == 0, 0); - assert!(coin::balance(carol_addr) == 10000, 0); - - // Again, aggregator coin is drained and total supply is changed by 10% of 9000. - let collected_fees = borrow_global(@aptos_framework); - assert!(coin::is_aggregatable_coin_zero(&collected_fees.amount), 0); - assert!(*option::borrow(&collected_fees.proposer) == carol_addr, 0); - assert!(*option::borrow(&coin::supply()) == 29000, 0); - - // Simulate transaction fee collection one last time. - collect_fee(alice_addr, 1000); - collect_fee(alice_addr, 1000); - - // Block 4 starts. - process_collected_fees(); - register_proposer_for_fee_collection(alice_addr); - - // Check that 2000 was collected from Alice. - assert!(coin::balance(alice_addr) == 8000, 0); - assert!(coin::balance(bob_addr) == 0, 0); - - // Carol must have some fees assigned now. - let collected_fees = borrow_global(@aptos_framework); - assert!(stake::get_validator_fee(carol_addr) == 1800, 0); - assert!(coin::is_aggregatable_coin_zero(&collected_fees.amount), 0); - assert!(*option::borrow(&collected_fees.proposer) == alice_addr, 0); - assert!(*option::borrow(&coin::supply()) == 28800, 0); - - coin::destroy_burn_cap(burn_cap); - coin::destroy_mint_cap(mint_cap); - } - - #[test_only] - fun setup_coin_caps(aptos_framework: &signer) { - use aptos_framework::aptos_coin; - let (burn_cap, mint_cap) = aptos_coin::initialize_for_test(aptos_framework); - store_aptos_coin_burn_cap(aptos_framework, burn_cap); - store_aptos_coin_mint_cap(aptos_framework, mint_cap); - } + // DEPRECATED section: - #[test_only] - fun setup_atomic_bridge(aptos_framework: &signer) { - features::change_feature_flags_for_testing( - aptos_framework, - vector[features::get_atomic_bridge_feature()], - vector[] - ); + #[deprecated] + /// DEPRECATED: Stores information about the block proposer and the amount of fees + /// collected when executing the block. + struct CollectedFeesPerBlock has key { + amount: AggregatableCoin, + proposer: Option
, + burn_percentage: u8, } - #[test(aptos_framework = @aptos_framework)] - fun test_copy_capabilities(aptos_framework: &signer) acquires AptosCoinCapabilities, AptosCoinMintCapability { - setup_coin_caps(aptos_framework); - setup_atomic_bridge(aptos_framework); - - let (mint_cap, burn_cap) = copy_capabilities_for_bridge(aptos_framework); - coin::destroy_burn_cap(burn_cap); - coin::destroy_mint_cap(mint_cap); + #[deprecated] + /// DEPRECATED + public fun initialize_fee_collection_and_distribution(_aptos_framework: &signer, _burn_percentage: u8) { + abort error::not_implemented(ENO_LONGER_SUPPORTED) } - #[test(aptos_framework = @aptos_framework)] - #[expected_failure(abort_code = EATOMIC_BRIDGE_NOT_ENABLED, location = Self)] - fun test_copy_capabilities_no_bridge(aptos_framework: &signer) acquires AptosCoinCapabilities, AptosCoinMintCapability { - setup_coin_caps(aptos_framework); - features::change_feature_flags_for_testing( - aptos_framework, - vector[], - vector[features::get_atomic_bridge_feature()], - ); - let (mint_cap, burn_cap) = copy_capabilities_for_bridge(aptos_framework); - coin::destroy_burn_cap(burn_cap); - coin::destroy_mint_cap(mint_cap); + #[deprecated] + /// DEPRECATED + public fun upgrade_burn_percentage( + _aptos_framework: &signer, + _new_burn_percentage: u8 + ) { + abort error::not_implemented(ENO_LONGER_SUPPORTED) } - #[test(aptos_framework = @aptos_framework)] - #[expected_failure(abort_code = ECOPY_CAPS_SHOT, location = Self)] - fun test_copy_capabilities_one_too_many_shots(aptos_framework: &signer) acquires AptosCoinCapabilities, AptosCoinMintCapability { - setup_coin_caps(aptos_framework); - setup_atomic_bridge(aptos_framework); - let (mint_cap, burn_cap) = copy_capabilities_for_bridge(aptos_framework); - coin::destroy_burn_cap(burn_cap); - coin::destroy_mint_cap(mint_cap); - let (mint_cap, burn_cap) = copy_capabilities_for_bridge(aptos_framework); - coin::destroy_burn_cap(burn_cap); - coin::destroy_mint_cap(mint_cap); + #[deprecated] + public fun initialize_storage_refund(_: &signer) { + abort error::not_implemented(ENO_LONGER_SUPPORTED) } } diff --git a/aptos-move/framework/aptos-framework/sources/transaction_fee.spec.move b/aptos-move/framework/aptos-framework/sources/transaction_fee.spec.move index e11883f480788..7571861410c0a 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_fee.spec.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_fee.spec.move @@ -67,132 +67,7 @@ spec aptos_framework::transaction_fee { invariant burn_percentage <= 100; } - spec initialize_fee_collection_and_distribution(aptos_framework: &signer, burn_percentage: u8) { - use std::signer; - use aptos_framework::stake::ValidatorFees; - use aptos_framework::aggregator_factory; - use aptos_framework::system_addresses; - - // property 2: The initialization function may only be called once. - /// [high-level-req-2] - aborts_if exists(@aptos_framework); - aborts_if burn_percentage > 100; - - let aptos_addr = signer::address_of(aptos_framework); - // property 3: Only the admin address is authorized to call the initialization function. - /// [high-level-req-3] - aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); - aborts_if exists(aptos_addr); - - include system_addresses::AbortsIfNotAptosFramework { account: aptos_framework }; - include aggregator_factory::CreateAggregatorInternalAbortsIf; - aborts_if exists(aptos_addr); - - ensures exists(aptos_addr); - ensures exists(aptos_addr); - } - - spec upgrade_burn_percentage(aptos_framework: &signer, new_burn_percentage: u8) { - use std::signer; - - // Percentage validation - aborts_if new_burn_percentage > 100; - // Signer validation - let aptos_addr = signer::address_of(aptos_framework); - aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); - - // property 5: Prior to upgrading the burn percentage, it must process all the fees collected up to that point. - // property 6: Ensure the presence of the resource. - // Requirements and ensures conditions of `process_collected_fees` - /// [high-level-req-5] - /// [high-level-req-6.3] - include ProcessCollectedFeesRequiresAndEnsures; - - // The effect of upgrading the burn percentage - ensures exists(@aptos_framework) ==> - global(@aptos_framework).burn_percentage == new_burn_percentage; - } - - spec register_proposer_for_fee_collection(proposer_addr: address) { - aborts_if false; - // property 6: Ensure the presence of the resource. - /// [high-level-req-6.1] - ensures is_fees_collection_enabled() ==> - option::spec_borrow(global(@aptos_framework).proposer) == proposer_addr; - } - - spec burn_coin_fraction(coin: &mut Coin, burn_percentage: u8) { - use aptos_framework::coin::CoinInfo; - use aptos_framework::aptos_coin::AptosCoin; - - requires burn_percentage <= 100; - requires exists(@aptos_framework); - requires exists>(@aptos_framework); - - let amount_to_burn = (burn_percentage * coin::value(coin)) / 100; - // include (amount_to_burn > 0) ==> coin::AbortsIfNotExistCoinInfo; - include amount_to_burn > 0 ==> coin::CoinSubAbortsIf { amount: amount_to_burn }; - ensures coin.value == old(coin).value - amount_to_burn; - } - - spec fun collectedFeesAggregator(): AggregatableCoin { - global(@aptos_framework).amount - } - - spec schema RequiresCollectedFeesPerValueLeqBlockAptosSupply { - use aptos_framework::optional_aggregator; - use aptos_framework::aggregator; - let maybe_supply = coin::get_coin_supply_opt(); - // property 6: Ensure the presence of the resource. - requires - (is_fees_collection_enabled() && option::is_some(maybe_supply)) ==> - (aggregator::spec_aggregator_get_val(global(@aptos_framework).amount.value) <= - optional_aggregator::optional_aggregator_value( - option::spec_borrow(coin::get_coin_supply_opt()) - )); - } - - spec schema ProcessCollectedFeesRequiresAndEnsures { - use aptos_framework::coin::CoinInfo; - use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::aggregator; - use aptos_std::table; - - requires exists(@aptos_framework); - requires exists(@aptos_framework); - requires exists>(@aptos_framework); - include RequiresCollectedFeesPerValueLeqBlockAptosSupply; - - aborts_if false; - - let collected_fees = global(@aptos_framework); - let post post_collected_fees = global(@aptos_framework); - let pre_amount = aggregator::spec_aggregator_get_val(collected_fees.amount.value); - let post post_amount = aggregator::spec_aggregator_get_val(post_collected_fees.amount.value); - let fees_table = global(@aptos_framework).fees_table; - let post post_fees_table = global(@aptos_framework).fees_table; - let proposer = option::spec_borrow(collected_fees.proposer); - let fee_to_add = pre_amount - pre_amount * collected_fees.burn_percentage / 100; - ensures is_fees_collection_enabled() ==> option::spec_is_none(post_collected_fees.proposer) && post_amount == 0; - ensures is_fees_collection_enabled() && aggregator::spec_read(collected_fees.amount.value) > 0 && - option::spec_is_some(collected_fees.proposer) ==> - if (proposer != @vm_reserved) { - if (table::spec_contains(fees_table, proposer)) { - table::spec_get(post_fees_table, proposer).value == table::spec_get( - fees_table, - proposer - ).value + fee_to_add - } else { - table::spec_get(post_fees_table, proposer).value == fee_to_add - } - } else { - option::spec_is_none(post_collected_fees.proposer) && post_amount == 0 - }; - } - - spec process_collected_fees() { - /// [high-level-req-6.2] - include ProcessCollectedFeesRequiresAndEnsures; + spec initialize_fee_collection_and_distribution(_aptos_framework: &signer, _burn_percentage: u8) { } /// `AptosCoinCapabilities` should be exists. @@ -264,30 +139,6 @@ spec aptos_framework::transaction_fee { ensures post_supply == supply + refund; } - spec collect_fee(account: address, fee: u64) { - use aptos_framework::aggregator; - // TODO(fa_migration) - pragma verify = false; - - let collected_fees = global(@aptos_framework).amount; - let aggr = collected_fees.value; - let coin_store = global>(account); - aborts_if !exists(@aptos_framework); - aborts_if fee > 0 && !exists>(account); - aborts_if fee > 0 && coin_store.coin.value < fee; - aborts_if fee > 0 && aggregator::spec_aggregator_get_val(aggr) - + fee > aggregator::spec_get_limit(aggr); - aborts_if fee > 0 && aggregator::spec_aggregator_get_val(aggr) - + fee > MAX_U128; - - let post post_coin_store = global>(account); - let post post_collected_fees = global(@aptos_framework).amount; - ensures post_coin_store.coin.value == coin_store.coin.value - fee; - ensures aggregator::spec_aggregator_get_val(post_collected_fees.value) == aggregator::spec_aggregator_get_val( - aggr - ) + fee; - } - /// Ensure caller is admin. /// Aborts if `AptosCoinCapabilities` already exists. spec store_aptos_coin_burn_cap(aptos_framework: &signer, burn_cap: BurnCapability) { diff --git a/aptos-move/framework/aptos-framework/sources/transaction_validation.move b/aptos-move/framework/aptos-framework/sources/transaction_validation.move index 3a4d480401703..bb5daa6328d26 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_validation.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_validation.move @@ -1,22 +1,40 @@ module aptos_framework::transaction_validation { - use std::bcs; use std::error; use std::features; + use std::option; + use std::option::Option; use std::signer; use std::vector; use aptos_framework::account; use aptos_framework::aptos_account; + use aptos_framework::account_abstraction; use aptos_framework::aptos_coin::AptosCoin; use aptos_framework::chain_id; use aptos_framework::coin; + use aptos_framework::create_signer; + use aptos_framework::permissioned_signer; use aptos_framework::system_addresses; use aptos_framework::timestamp; use aptos_framework::transaction_fee; - use aptos_framework::governed_gas_pool; + use aptos_framework::nonce_validation; friend aptos_framework::genesis; + // We will advertise to the community that max expiration time for orderless txns is 60 seconds. + // Adding a 5 second slack here as the client's time and the blockchain's time may drift. + const MAX_EXPIRATION_TIME_SECONDS_FOR_ORDERLESS_TXNS: u64 = 65; + + // We need to ensure that a transaction can't be replayed. + // There are two ways to prevent replay attacks: + // 1. Use a nonce. Orderless transactions use this. + // 2. Use a sequence number. Regular transactions use this. + // A replay protector of a transaction signifies which of the above methods is used. + enum ReplayProtector { + Nonce(u64), + SequenceNumber(u64), + } + /// This holds information that will be picked up by the VM to call the /// correct chain-specific prologue and epilogue functions struct TransactionValidation has key { @@ -29,6 +47,8 @@ module aptos_framework::transaction_validation { user_epilogue_name: vector, } + struct GasPermission has copy, drop, store {} + /// MSB is used to indicate a gas payer tx const MAX_U64: u128 = 18446744073709551615; @@ -48,6 +68,30 @@ module aptos_framework::transaction_validation { const PROLOGUE_ESEQUENCE_NUMBER_TOO_BIG: u64 = 1008; const PROLOGUE_ESECONDARY_KEYS_ADDRESSES_COUNT_MISMATCH: u64 = 1009; const PROLOGUE_EFEE_PAYER_NOT_ENABLED: u64 = 1010; + const PROLOGUE_PERMISSIONED_GAS_LIMIT_INSUFFICIENT: u64 = 1011; + const PROLOGUE_ENONCE_ALREADY_USED: u64 = 1012; + const PROLOGUE_ETRANSACTION_EXPIRATION_TOO_FAR_IN_FUTURE: u64 = 1013; + + /// Permission management + /// + /// Master signer grant permissioned signer ability to consume a given amount of gas in octas. + public fun grant_gas_permission( + master: &signer, + permissioned: &signer, + gas_amount: u64 + ) { + permissioned_signer::authorize_increase( + master, + permissioned, + (gas_amount as u256), + GasPermission {} + ) + } + + /// Removing permissions from permissioned signer. + public fun revoke_gas_permission(permissioned: &signer) { + permissioned_signer::revoke_permission(permissioned, GasPermission {}) + } /// Only called during genesis to initialize system resources for this module. public(friend) fun initialize( @@ -71,37 +115,109 @@ module aptos_framework::transaction_validation { }); } + // TODO: can be removed after features have been rolled out. + inline fun allow_missing_txn_authentication_key(transaction_sender: address): bool { + // aa verifies authentication itself + features::is_derivable_account_abstraction_enabled() + || (features::is_account_abstraction_enabled() && account_abstraction::using_dispatchable_authenticator(transaction_sender)) + } + fun prologue_common( - sender: signer, - gas_payer: address, - txn_sequence_number: u64, - txn_authentication_key: vector, + sender: &signer, + gas_payer: &signer, + replay_protector: ReplayProtector, + txn_authentication_key: Option>, txn_gas_price: u64, txn_max_gas_units: u64, txn_expiration_time: u64, chain_id: u8, + is_simulation: bool, ) { + let sender_address = signer::address_of(sender); + let gas_payer_address = signer::address_of(gas_payer); assert!( timestamp::now_seconds() < txn_expiration_time, error::invalid_argument(PROLOGUE_ETRANSACTION_EXPIRED), ); assert!(chain_id::get() == chain_id, error::invalid_argument(PROLOGUE_EBAD_CHAIN_ID)); - let transaction_sender = signer::address_of(&sender); + // TODO[Orderless]: Here, we are maintaining the same order of validation steps as before orderless txns were introduced. + // Ideally, do the replay protection check in the end after the authentication key check and gas payment checks. + + // Check if the authentication key is valid + if (!skip_auth_key_check(is_simulation, &txn_authentication_key)) { + if (option::is_some(&txn_authentication_key)) { + assert!( + txn_authentication_key == option::some(account::get_authentication_key(sender_address)), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ); + } else { + assert!( + allow_missing_txn_authentication_key(sender_address), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY) + ); + }; + }; + + // Check for replay protection + match (replay_protector) { + SequenceNumber(txn_sequence_number) => { + check_for_replay_protection_regular_txn( + sender_address, + gas_payer_address, + txn_sequence_number, + ); + }, + Nonce(nonce) => { + check_for_replay_protection_orderless_txn( + sender_address, + nonce, + txn_expiration_time, + ); + } + }; + + // Check if the gas payer has enough balance to pay for the transaction + let max_transaction_fee = txn_gas_price * txn_max_gas_units; + if (!skip_gas_payment( + is_simulation, + gas_payer_address + )) { + assert!( + permissioned_signer::check_permission_capacity_above( + gas_payer, + (max_transaction_fee as u256), + GasPermission {} + ), + error::permission_denied(PROLOGUE_PERMISSIONED_GAS_LIMIT_INSUFFICIENT) + ); + if (features::operations_default_to_fa_apt_store_enabled()) { + assert!( + aptos_account::is_fungible_balance_at_least(gas_payer_address, max_transaction_fee), + error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) + ); + } else { + assert!( + coin::is_balance_at_least(gas_payer_address, max_transaction_fee), + error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) + ); + } + }; + } + fun check_for_replay_protection_regular_txn( + sender_address: address, + gas_payer_address: address, + txn_sequence_number: u64, + ) { if ( - transaction_sender == gas_payer - || account::exists_at(transaction_sender) + sender_address == gas_payer_address + || account::exists_at(sender_address) || !features::sponsored_automatic_account_creation_enabled() || txn_sequence_number > 0 ) { - assert!(account::exists_at(transaction_sender), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST)); - assert!( - txn_authentication_key == account::get_authentication_key(transaction_sender), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); - - let account_sequence_number = account::get_sequence_number(transaction_sender); + assert!(account::exists_at(sender_address), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST)); + let account_sequence_number = account::get_sequence_number(sender_address); assert!( txn_sequence_number < (1u64 << 63), error::out_of_range(PROLOGUE_ESEQUENCE_NUMBER_TOO_BIG) @@ -123,26 +239,20 @@ module aptos_framework::transaction_validation { txn_sequence_number == 0, error::invalid_argument(PROLOGUE_ESEQUENCE_NUMBER_TOO_NEW) ); - - assert!( - txn_authentication_key == bcs::to_bytes(&transaction_sender), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); }; + } - let max_transaction_fee = txn_gas_price * txn_max_gas_units; - - if (features::operations_default_to_fa_apt_store_enabled()) { - assert!( - aptos_account::is_fungible_balance_at_least(gas_payer, max_transaction_fee), - error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) - ); - } else { - assert!( - coin::is_balance_at_least(gas_payer, max_transaction_fee), - error::invalid_argument(PROLOGUE_ECANT_PAY_GAS_DEPOSIT) - ); - } + fun check_for_replay_protection_orderless_txn( + sender: address, + nonce: u64, + txn_expiration_time: u64, + ) { + // prologue_common already checks that the current_time > txn_expiration_time + assert!( + txn_expiration_time <= timestamp::now_seconds() + MAX_EXPIRATION_TIME_SECONDS_FOR_ORDERLESS_TXNS, + error::invalid_argument(PROLOGUE_ETRANSACTION_EXPIRATION_TOO_FAR_IN_FUTURE), + ); + assert!(nonce_validation::check_and_insert_nonce(sender, nonce, txn_expiration_time), error::invalid_argument(PROLOGUE_ENONCE_ALREADY_USED)); } fun script_prologue( @@ -155,16 +265,44 @@ module aptos_framework::transaction_validation { chain_id: u8, _script_hash: vector, ) { - let gas_payer = signer::address_of(&sender); + // prologue_common with is_simulation set to false behaves identically to the original script_prologue function. prologue_common( - sender, - gas_payer, - txn_sequence_number, - txn_public_key, + &sender, + &sender, + ReplayProtector::SequenceNumber(txn_sequence_number), + option::some(txn_public_key), txn_gas_price, txn_max_gas_units, txn_expiration_time, - chain_id + chain_id, + false, + ) + } + + // This function extends the script_prologue by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_public_key: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + _script_hash: vector, + is_simulation: bool, + ) { + prologue_common( + &sender, + &sender, + ReplayProtector::SequenceNumber(txn_sequence_number), + option::some(txn_public_key), + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, ) } @@ -179,23 +317,63 @@ module aptos_framework::transaction_validation { txn_expiration_time: u64, chain_id: u8, ) { - let sender_addr = signer::address_of(&sender); + // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the + // original multi_agent_script_prologue function. prologue_common( - sender, - sender_addr, - txn_sequence_number, - txn_sender_public_key, + &sender, + &sender, + ReplayProtector::SequenceNumber(txn_sequence_number), + option::some(txn_sender_public_key), txn_gas_price, txn_max_gas_units, txn_expiration_time, chain_id, + false, + ); + multi_agent_common_prologue( + secondary_signer_addresses, + vector::map(secondary_signer_public_key_hashes, |x| option::some(x)), + false ); - multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes); } - fun multi_agent_common_prologue( + // This function extends the multi_agent_script_prologue by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun multi_agent_script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, secondary_signer_addresses: vector
, secondary_signer_public_key_hashes: vector>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + prologue_common( + &sender, + &sender, + ReplayProtector::SequenceNumber(txn_sequence_number), + option::some(txn_sender_public_key), + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, + ); + multi_agent_common_prologue( + secondary_signer_addresses, + vector::map(secondary_signer_public_key_hashes, |x| option::some(x)), + is_simulation + ); + } + + fun multi_agent_common_prologue( + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + is_simulation: bool, ) { let num_secondary_signers = vector::length(&secondary_signer_addresses); assert!( @@ -205,23 +383,49 @@ module aptos_framework::transaction_validation { let i = 0; while ({ - spec { - invariant i <= num_secondary_signers; - invariant forall j in 0..i: - account::exists_at(secondary_signer_addresses[j]) - && secondary_signer_public_key_hashes[j] - == account::get_authentication_key(secondary_signer_addresses[j]); - }; + // spec { + // invariant i <= num_secondary_signers; + // invariant forall j in 0..i: + // account::exists_at(secondary_signer_addresses[j]); + // invariant forall j in 0..i: + // secondary_signer_public_key_hashes[j] == account::get_authentication_key(secondary_signer_addresses[j]) || + // (features::spec_simulation_enhancement_enabled() && is_simulation && vector::is_empty(secondary_signer_public_key_hashes[j])); + // account::account_resource_exists_at(secondary_signer_addresses[j]) + // && secondary_signer_public_key_hashes[j] + // == account::get_authentication_key(secondary_signer_addresses[j]) + // || features::account_abstraction_enabled() && account_abstraction::using_native_authenticator( + // secondary_signer_addresses[j] + // ) && option::spec_some(secondary_signer_public_key_hashes[j]) == account_abstraction::native_authenticator( + // account::exists_at(secondary_signer_addresses[j]) + // && secondary_signer_public_key_hashes[j] + // == account::spec_get_authentication_key(secondary_signer_addresses[j]) + // || features::spec_account_abstraction_enabled() && account_abstraction::using_native_authenticator( + // secondary_signer_addresses[j] + // ) && option::spec_some( + // secondary_signer_public_key_hashes[j] + // ) == account_abstraction::spec_native_authenticator( + // secondary_signer_addresses[j] + // ); + // }; (i < num_secondary_signers) }) { let secondary_address = *vector::borrow(&secondary_signer_addresses, i); assert!(account::exists_at(secondary_address), error::invalid_argument(PROLOGUE_EACCOUNT_DOES_NOT_EXIST)); - let signer_public_key_hash = *vector::borrow(&secondary_signer_public_key_hashes, i); - assert!( - signer_public_key_hash == account::get_authentication_key(secondary_address), - error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), - ); + if (!skip_auth_key_check(is_simulation, &signer_public_key_hash)) { + if (option::is_some(&signer_public_key_hash)) { + assert!( + signer_public_key_hash == option::some(account::get_authentication_key(secondary_address)), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY) + ); + } else { + assert!( + allow_missing_txn_authentication_key(secondary_address), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY) + ) + }; + }; + i = i + 1; } } @@ -240,23 +444,72 @@ module aptos_framework::transaction_validation { chain_id: u8, ) { assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED)); + // prologue_common and multi_agent_common_prologue with is_simulation set to false behaves identically to the + // original fee_payer_script_prologue function. prologue_common( - sender, - fee_payer_address, - txn_sequence_number, - txn_sender_public_key, + &sender, + &create_signer::create_signer(fee_payer_address), + ReplayProtector::SequenceNumber(txn_sequence_number), + option::some(txn_sender_public_key), txn_gas_price, txn_max_gas_units, txn_expiration_time, chain_id, + false, + ); + multi_agent_common_prologue( + secondary_signer_addresses, + vector::map(secondary_signer_public_key_hashes, |x| option::some(x)), + false ); - multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes); assert!( fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address), error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), ); } + // This function extends the fee_payer_script_prologue by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun fee_payer_script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + fee_payer_address: address, + fee_payer_public_key_hash: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + assert!(features::fee_payer_enabled(), error::invalid_state(PROLOGUE_EFEE_PAYER_NOT_ENABLED)); + prologue_common( + &sender, + &create_signer::create_signer(fee_payer_address), + ReplayProtector::SequenceNumber(txn_sequence_number), + option::some(txn_sender_public_key), + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, + ); + multi_agent_common_prologue( + secondary_signer_addresses, + vector::map(secondary_signer_public_key_hashes, |x| option::some(x)), + is_simulation + ); + if (!skip_auth_key_check(is_simulation, &option::some(fee_payer_public_key_hash))) { + assert!( + fee_payer_public_key_hash == account::get_authentication_key(fee_payer_address), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY), + ) + } + } + /// Epilogue function is run after a transaction is successfully executed. /// Called by the Adapter fun epilogue( @@ -264,10 +517,40 @@ module aptos_framework::transaction_validation { storage_fee_refunded: u64, txn_gas_price: u64, txn_max_gas_units: u64, - gas_units_remaining: u64 + gas_units_remaining: u64, ) { let addr = signer::address_of(&account); - epilogue_gas_payer(account, addr, storage_fee_refunded, txn_gas_price, txn_max_gas_units, gas_units_remaining); + epilogue_gas_payer( + account, + addr, + storage_fee_refunded, + txn_gas_price, + txn_max_gas_units, + gas_units_remaining + ); + } + + // This function extends the epilogue by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun epilogue_extended( + account: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, + ) { + let addr = signer::address_of(&account); + epilogue_gas_payer_extended( + account, + addr, + storage_fee_refunded, + txn_gas_price, + txn_max_gas_units, + gas_units_remaining, + is_simulation + ); } /// Epilogue function with explicit gas payer specified, is run after a transaction is successfully executed. @@ -279,6 +562,31 @@ module aptos_framework::transaction_validation { txn_gas_price: u64, txn_max_gas_units: u64, gas_units_remaining: u64 + ) { + // epilogue_gas_payer_extended with is_simulation set to false behaves identically to the original + // epilogue_gas_payer function. + epilogue_gas_payer_extended( + account, + gas_payer, + storage_fee_refunded, + txn_gas_price, + txn_max_gas_units, + gas_units_remaining, + false, + ); + } + + // This function extends the epilogue_gas_payer by adding a parameter to indicate simulation mode. + // Once the transaction_simulation_enhancement feature is enabled, the Aptos VM will invoke this function instead. + // Eventually, this function will be consolidated with the original function once the feature is fully enabled. + fun epilogue_gas_payer_extended( + account: signer, + gas_payer: address, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, ) { assert!(txn_max_gas_units >= gas_units_remaining, error::invalid_argument(EOUT_OF_GAS)); let gas_used = txn_max_gas_units - gas_units_remaining; @@ -291,49 +599,262 @@ module aptos_framework::transaction_validation { // it's important to maintain the error code consistent with vm // to do failed transaction cleanup. - if (features::operations_default_to_fa_apt_store_enabled()) { - assert!( - aptos_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount), - error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), - ); - } else { - assert!( - coin::is_balance_at_least(gas_payer, transaction_fee_amount), - error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), - ); - }; - - let amount_to_burn = if (features::collect_and_distribute_gas_fees()) { - // TODO(gas): We might want to distinguish the refundable part of the charge and burn it or track - // it separately, so that we don't increase the total supply by refunding. - - // If transaction fees are redistributed to validators, collect them here for - // later redistribution. - transaction_fee::collect_fee(gas_payer, transaction_fee_amount); - 0 - } else { - // Otherwise, just burn the fee. - // TODO: this branch should be removed completely when transaction fee collection - // is tested and is fully proven to work well. - transaction_fee_amount - }; - - if (amount_to_burn > storage_fee_refunded) { - let burn_amount = amount_to_burn - storage_fee_refunded; - if (features::governed_gas_pool_enabled()) { - governed_gas_pool::deposit_gas_fee_v2(gas_payer, burn_amount); + if (!skip_gas_payment(is_simulation, gas_payer)) { + if (features::operations_default_to_fa_apt_store_enabled()) { + assert!( + aptos_account::is_fungible_balance_at_least(gas_payer, transaction_fee_amount), + error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), + ); } else { + assert!( + coin::is_balance_at_least(gas_payer, transaction_fee_amount), + error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), + ); + }; + + if (transaction_fee_amount > storage_fee_refunded) { + let burn_amount = transaction_fee_amount - storage_fee_refunded; transaction_fee::burn_fee(gas_payer, burn_amount); - } - } else if (amount_to_burn < storage_fee_refunded) { - let mint_amount = storage_fee_refunded - amount_to_burn; - if (!features::governed_gas_pool_enabled()) { + } else if (transaction_fee_amount < storage_fee_refunded) { + let mint_amount = storage_fee_refunded - transaction_fee_amount; transaction_fee::mint_and_refund(gas_payer, mint_amount); - } + }; }; // Increment sequence number let addr = signer::address_of(&account); account::increment_sequence_number(addr); } + + inline fun skip_auth_key_check(is_simulation: bool, auth_key: &Option>): bool { + is_simulation && (option::is_none(auth_key) || vector::is_empty(option::borrow(auth_key))) + } + + inline fun skip_gas_payment(is_simulation: bool, gas_payer: address): bool { + is_simulation && gas_payer == @0x0 + } + + /////////////////////////////////////////////////////////// + /// new set of functions + /////////////////////////////////////////////////////////// + + fun unified_prologue( + sender: signer, + // None means no need to check, i.e. either AA (where it is already checked) or simulation + txn_sender_public_key: Option>, + txn_sequence_number: u64, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + unified_prologue_v2( + sender, + txn_sender_public_key, + ReplayProtector::SequenceNumber(txn_sequence_number), + secondary_signer_addresses, + secondary_signer_public_key_hashes, + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, + + ) + } + + /// If there is no fee_payer, fee_payer = sender + fun unified_prologue_fee_payer( + sender: signer, + fee_payer: signer, + // None means no need to check, i.e. either AA (where it is already checked) or simulation + txn_sender_public_key: Option>, + // None means no need to check, i.e. either AA (where it is already checked) or simulation + fee_payer_public_key_hash: Option>, + txn_sequence_number: u64, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + unified_prologue_fee_payer_v2( + sender, + fee_payer, + txn_sender_public_key, + fee_payer_public_key_hash, + ReplayProtector::SequenceNumber(txn_sequence_number), + secondary_signer_addresses, + secondary_signer_public_key_hashes, + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, + ) + } + + fun unified_epilogue( + account: signer, + gas_payer: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, + ) { + unified_epilogue_v2( + account, + gas_payer, + storage_fee_refunded, + txn_gas_price, + txn_max_gas_units, + gas_units_remaining, + is_simulation, + false, + ) + } + + + /////////////////////////////////////////////////////////// + /// new set of functions to support txn payload v2 format and orderless transactions + /////////////////////////////////////////////////////////// + + fun unified_prologue_v2( + sender: signer, + txn_sender_public_key: Option>, + replay_protector: ReplayProtector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + prologue_common( + &sender, + &sender, + replay_protector, + txn_sender_public_key, + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, + ); + multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation); + } + + /// If there is no fee_payer, fee_payer = sender + fun unified_prologue_fee_payer_v2( + sender: signer, + fee_payer: signer, + txn_sender_public_key: Option>, + fee_payer_public_key_hash: Option>, + replay_protector: ReplayProtector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + prologue_common( + &sender, + &fee_payer, + replay_protector, + txn_sender_public_key, + txn_gas_price, + txn_max_gas_units, + txn_expiration_time, + chain_id, + is_simulation, + ); + multi_agent_common_prologue(secondary_signer_addresses, secondary_signer_public_key_hashes, is_simulation); + if (!skip_auth_key_check(is_simulation, &fee_payer_public_key_hash)) { + let fee_payer_address = signer::address_of(&fee_payer); + if (option::is_some(&fee_payer_public_key_hash)) { + assert!( + fee_payer_public_key_hash == option::some(account::get_authentication_key(fee_payer_address)), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY) + ); + } else { + assert!( + allow_missing_txn_authentication_key(fee_payer_address), + error::invalid_argument(PROLOGUE_EINVALID_ACCOUNT_AUTH_KEY) + ) + }; + } + } + + fun unified_epilogue_v2( + account: signer, + gas_payer: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, + is_orderless_txn: bool, + ) { + assert!(txn_max_gas_units >= gas_units_remaining, error::invalid_argument(EOUT_OF_GAS)); + let gas_used = txn_max_gas_units - gas_units_remaining; + + assert!( + (txn_gas_price as u128) * (gas_used as u128) <= MAX_U64, + error::out_of_range(EOUT_OF_GAS) + ); + let transaction_fee_amount = txn_gas_price * gas_used; + + let gas_payer_address = signer::address_of(&gas_payer); + // it's important to maintain the error code consistent with vm + // to do failed transaction cleanup. + if (!skip_gas_payment( + is_simulation, + gas_payer_address + )) { + if (features::operations_default_to_fa_apt_store_enabled()) { + assert!( + aptos_account::is_fungible_balance_at_least(gas_payer_address, transaction_fee_amount), + error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), + ); + } else { + assert!( + coin::is_balance_at_least(gas_payer_address, transaction_fee_amount), + error::out_of_range(PROLOGUE_ECANT_PAY_GAS_DEPOSIT), + ); + }; + + if (transaction_fee_amount > storage_fee_refunded) { + let burn_amount = transaction_fee_amount - storage_fee_refunded; + transaction_fee::burn_fee(gas_payer_address, burn_amount); + permissioned_signer::check_permission_consume( + &gas_payer, + (burn_amount as u256), + GasPermission {} + ); + } else if (transaction_fee_amount < storage_fee_refunded) { + let mint_amount = storage_fee_refunded - transaction_fee_amount; + transaction_fee::mint_and_refund(gas_payer_address, mint_amount); + permissioned_signer::increase_limit( + &gas_payer, + (mint_amount as u256), + GasPermission {} + ); + }; + }; + + if (!is_orderless_txn) { + // Increment sequence number + let addr = signer::address_of(&account); + account::increment_sequence_number(addr); + } + } } diff --git a/aptos-move/framework/aptos-framework/sources/transaction_validation.spec.move b/aptos-move/framework/aptos-framework/sources/transaction_validation.spec.move index ac6168d176ebb..23bedb1640642 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_validation.spec.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_validation.spec.move @@ -28,14 +28,26 @@ spec aptos_framework::transaction_validation { pragma aborts_if_is_strict; } + spec grant_gas_permission( + master: &signer, + permissioned: &signer, + gas_amount: u64 + ) { + pragma aborts_if_is_partial; + } + + spec revoke_gas_permission(permissioned: &signer) { + pragma aborts_if_is_partial; + } + /// Ensure caller is `aptos_framework`. /// Aborts if TransactionValidation already exists. spec initialize( - aptos_framework: &signer, - script_prologue_name: vector, - module_prologue_name: vector, - multi_agent_prologue_name: vector, - user_epilogue_name: vector, + aptos_framework: &signer, + script_prologue_name: vector, + module_prologue_name: vector, + multi_agent_prologue_name: vector, + user_epilogue_name: vector, ) { use std::signer; let addr = signer::address_of(aptos_framework); @@ -48,15 +60,12 @@ spec aptos_framework::transaction_validation { /// Create a schema to reuse some code. /// Give some constraints that may abort according to the conditions. spec schema PrologueCommonAbortsIf { - use std::bcs; use aptos_framework::timestamp::{CurrentTimeMicroseconds}; use aptos_framework::chain_id::{ChainId}; - use aptos_framework::account::{Account}; - use aptos_framework::coin::{CoinStore}; - sender: signer; - gas_payer: address; - txn_sequence_number: u64; - txn_authentication_key: vector; + sender: &signer; + gas_payer: &signer; + replay_protector: ReplayProtector; + txn_authentication_key: Option>; txn_gas_price: u64; txn_max_gas_units: u64; txn_expiration_time: u64; @@ -68,71 +77,79 @@ spec aptos_framework::transaction_validation { aborts_if !exists(@aptos_framework); aborts_if !(chain_id::get() == chain_id); let transaction_sender = signer::address_of(sender); - - aborts_if ( - !features::spec_is_enabled(features::SPONSORED_AUTOMATIC_ACCOUNT_CREATION) - || account::exists_at(transaction_sender) - || transaction_sender == gas_payer - || txn_sequence_number > 0 - ) && ( - !(txn_sequence_number >= global(transaction_sender).sequence_number) - || !(txn_authentication_key == global(transaction_sender).authentication_key) - || !account::exists_at(transaction_sender) - || !(txn_sequence_number == global(transaction_sender).sequence_number) - ); - - aborts_if features::spec_is_enabled(features::SPONSORED_AUTOMATIC_ACCOUNT_CREATION) - && transaction_sender != gas_payer - && txn_sequence_number == 0 - && !account::exists_at(transaction_sender) - && txn_authentication_key != bcs::to_bytes(transaction_sender); - - aborts_if !(txn_sequence_number < (1u64 << 63)); - - let max_transaction_fee = txn_gas_price * txn_max_gas_units; - aborts_if max_transaction_fee > MAX_U64; - aborts_if !exists>(gas_payer); - // property 1: The sender of a transaction should have sufficient coin balance to pay the transaction fee. - /// [high-level-req-1] - aborts_if !(global>(gas_payer).coin.value >= max_transaction_fee); + let gas_payer_addr = signer::address_of(gas_payer); } spec prologue_common( - sender: signer, - gas_payer: address, - txn_sequence_number: u64, - txn_authentication_key: vector, - txn_gas_price: u64, - txn_max_gas_units: u64, - txn_expiration_time: u64, - chain_id: u8, + sender: &signer, + gas_payer: &signer, + replay_protector: ReplayProtector, + txn_authentication_key: Option>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, ) { // TODO(fa_migration) pragma verify = false; include PrologueCommonAbortsIf; } - spec script_prologue( - sender: signer, - txn_sequence_number: u64, - txn_public_key: vector, - txn_gas_price: u64, - txn_max_gas_units: u64, - txn_expiration_time: u64, - chain_id: u8, - _script_hash: vector, + spec check_for_replay_protection_orderless_txn( + sender: address, + nonce: u64, + txn_expiration_time: u64, + ) { + pragma verify = false; + } + + spec check_for_replay_protection_regular_txn( + sender_address: address, + gas_payer_address: address, + txn_sequence_number: u64, + ) { + pragma verify = false; + } + + spec script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_public_key: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + _script_hash: vector, + is_simulation: bool, ) { // TODO(fa_migration) pragma verify = false; include PrologueCommonAbortsIf { - gas_payer: signer::address_of(sender), - txn_authentication_key: txn_public_key + gas_payer: sender, + txn_authentication_key: option::spec_some(txn_public_key), + replay_protector: ReplayProtector::SequenceNumber(txn_sequence_number), }; } + spec script_prologue( + sender: signer, + txn_sequence_number: u64, + txn_public_key: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + _script_hash: vector, + ) { + // TODO: temporary mockup + pragma verify = false; + } + spec schema MultiAgentPrologueCommonAbortsIf { secondary_signer_addresses: vector
; - secondary_signer_public_key_hashes: vector>; + secondary_signer_public_key_hashes: vector>>; + is_simulation: bool; // Vectors to be `zipped with` should be of equal length. let num_secondary_signers = len(secondary_signer_addresses); @@ -142,129 +159,299 @@ spec aptos_framework::transaction_validation { // property 2: All secondary signer addresses are verified to be authentic through a validation process. /// [high-level-req-2] aborts_if exists i in 0..num_secondary_signers: - !account::exists_at(secondary_signer_addresses[i]) - || secondary_signer_public_key_hashes[i] != - account::get_authentication_key(secondary_signer_addresses[i]); - + !account::spec_exists_at(secondary_signer_addresses[i]); + aborts_if exists i in 0..num_secondary_signers: + !can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]) && + option::spec_is_some(secondary_signer_public_key_hashes[i]) && option::spec_borrow( + secondary_signer_public_key_hashes[i] + ) != + account::spec_get_authentication_key(secondary_signer_addresses[i]); // By the end, all secondary signers account should exist and public key hash should match. ensures forall i in 0..num_secondary_signers: - account::exists_at(secondary_signer_addresses[i]) - && secondary_signer_public_key_hashes[i] == - account::get_authentication_key(secondary_signer_addresses[i]); + account::spec_exists_at(secondary_signer_addresses[i]); + ensures forall i in 0..num_secondary_signers: + option::spec_is_none(secondary_signer_public_key_hashes[i]) || option::spec_borrow( + secondary_signer_public_key_hashes[i] + ) == + account::spec_get_authentication_key(secondary_signer_addresses[i]) + || can_skip(features::spec_simulation_enhancement_enabled(), is_simulation, secondary_signer_public_key_hashes[i]); + } + + spec fun can_skip(feature_flag: bool, is_simulation: bool, auth_key: Option>): bool { + features::spec_simulation_enhancement_enabled() && is_simulation && option::spec_is_none(auth_key) } spec multi_agent_common_prologue( - secondary_signer_addresses: vector
, - secondary_signer_public_key_hashes: vector>, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + is_simulation: bool, ) { - include MultiAgentPrologueCommonAbortsIf { - secondary_signer_addresses, - secondary_signer_public_key_hashes, - }; + pragma aborts_if_is_partial; + // include MultiAgentPrologueCommonAbortsIf { + // secondary_signer_addresses, + // secondary_signer_public_key_hashes, + // is_simulation, + // }; } /// Aborts if length of public key hashed vector /// not equal the number of singers. - spec multi_agent_script_prologue ( - sender: signer, - txn_sequence_number: u64, - txn_sender_public_key: vector, - secondary_signer_addresses: vector
, - secondary_signer_public_key_hashes: vector>, - txn_gas_price: u64, - txn_max_gas_units: u64, - txn_expiration_time: u64, - chain_id: u8, + spec multi_agent_script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, ) { pragma verify_duration_estimate = 120; - let gas_payer = signer::address_of(sender); + let gas_payer = sender; // TODO(fa_migration) pragma verify = false; - include PrologueCommonAbortsIf { - gas_payer, - txn_sequence_number, - txn_authentication_key: txn_sender_public_key, - }; - include MultiAgentPrologueCommonAbortsIf { - secondary_signer_addresses, - secondary_signer_public_key_hashes, - }; + // include PrologueCommonAbortsIf { + // gas_payer, + // txn_sequence_number, + // txn_authentication_key: txn_sender_public_key, + // }; + // include MultiAgentPrologueCommonAbortsIf { + // secondary_signer_addresses, + // vector::map(secondary_signer_public_key_hashes, |x| option::spec_some(x)), + // is_simulation, + // }; } - spec fee_payer_script_prologue( - sender: signer, - txn_sequence_number: u64, - txn_sender_public_key: vector, - secondary_signer_addresses: vector
, - secondary_signer_public_key_hashes: vector>, - fee_payer_address: address, - fee_payer_public_key_hash: vector, - txn_gas_price: u64, - txn_max_gas_units: u64, - txn_expiration_time: u64, - chain_id: u8, + spec multi_agent_script_prologue( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec fee_payer_script_prologue_extended( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + fee_payer_address: address, + fee_payer_public_key_hash: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + pragma aborts_if_is_partial; pragma verify_duration_estimate = 120; aborts_if !features::spec_is_enabled(features::FEE_PAYER_ENABLED); - let gas_payer = fee_payer_address; + let gas_payer = create_signer::create_signer(fee_payer_address); include PrologueCommonAbortsIf { gas_payer, - txn_sequence_number, - txn_authentication_key: txn_sender_public_key, - }; - include MultiAgentPrologueCommonAbortsIf { - secondary_signer_addresses, - secondary_signer_public_key_hashes, + replay_protector: ReplayProtector::SequenceNumber(txn_sequence_number), + txn_authentication_key: option::spec_some(txn_sender_public_key), }; - - aborts_if !account::exists_at(gas_payer); - aborts_if !(fee_payer_public_key_hash == account::get_authentication_key(gas_payer)); + // include MultiAgentPrologueCommonAbortsIf { + // secondary_signer_addresses, + // secondary_signer_public_key_hashes, + // is_simulation, + // }; + + aborts_if !account::spec_exists_at(fee_payer_address); + aborts_if !(fee_payer_public_key_hash == account::spec_get_authentication_key(fee_payer_address)); aborts_if !features::spec_fee_payer_enabled(); } + spec fee_payer_script_prologue( + sender: signer, + txn_sequence_number: u64, + txn_sender_public_key: vector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>, + fee_payer_address: address, + fee_payer_public_key_hash: vector, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + ) { + // TODO: temporary mockup + pragma verify = false; + } + /// Abort according to the conditions. /// `AptosCoinCapabilities` and `CoinInfo` should exists. /// Skip transaction_fee::burn_fee verification. - spec epilogue( - account: signer, - storage_fee_refunded: u64, - txn_gas_price: u64, - txn_max_gas_units: u64, - gas_units_remaining: u64 + spec epilogue_extended( + account: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, ) { // TODO(fa_migration) pragma verify = false; include EpilogueGasPayerAbortsIf { gas_payer: signer::address_of(account) }; } + spec epilogue( + account: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + ) { + // TODO: temporary mockup + pragma verify = false; + } + /// Abort according to the conditions. /// `AptosCoinCapabilities` and `CoinInfo` should exist. /// Skip transaction_fee::burn_fee verification. - spec epilogue_gas_payer( - account: signer, - gas_payer: address, - storage_fee_refunded: u64, - txn_gas_price: u64, - txn_max_gas_units: u64, - gas_units_remaining: u64 + spec epilogue_gas_payer_extended( + account: signer, + gas_payer: address, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, ) { // TODO(fa_migration) pragma verify = false; include EpilogueGasPayerAbortsIf; } + spec epilogue_gas_payer( + account: signer, + gas_payer: address, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec unified_prologue( + sender: signer, + txn_sender_public_key: Option>, + txn_sequence_number: u64, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec unified_prologue_fee_payer( + sender: signer, + fee_payer: signer, + txn_sender_public_key: Option>, + fee_payer_public_key_hash: Option>, + txn_sequence_number: u64, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec unified_epilogue( + account: signer, + gas_payer: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, + ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec unified_prologue_v2( + sender: signer, + txn_sender_public_key: Option>, + replay_protector: ReplayProtector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec unified_prologue_fee_payer_v2( + sender: signer, + fee_payer: signer, + txn_sender_public_key: Option>, + fee_payer_public_key_hash: Option>, + replay_protector: ReplayProtector, + secondary_signer_addresses: vector
, + secondary_signer_public_key_hashes: vector>>, + txn_gas_price: u64, + txn_max_gas_units: u64, + txn_expiration_time: u64, + chain_id: u8, + is_simulation: bool, + ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec unified_epilogue_v2( + account: signer, + gas_payer: signer, + storage_fee_refunded: u64, + txn_gas_price: u64, + txn_max_gas_units: u64, + gas_units_remaining: u64, + is_simulation: bool, + is_orderless_txn: bool, + ) { + // TODO: temporary mockup + pragma verify = false; + } + + spec schema EpilogueGasPayerAbortsIf { use std::option; use aptos_std::type_info; use aptos_framework::account::{Account}; - use aptos_framework::aggregator; use aptos_framework::aptos_coin::{AptosCoin}; use aptos_framework::coin; use aptos_framework::coin::{CoinStore, CoinInfo}; use aptos_framework::optional_aggregator; - use aptos_framework::transaction_fee::{AptosCoinCapabilities, AptosCoinMintCapability, CollectedFeesPerBlock}; - use aptos_framework::governed_gas_pool::{GovernedGasPool, governed_gas_pool_address}; + use aptos_framework::transaction_fee::{AptosCoinCapabilities, AptosCoinMintCapability}; account: signer; gas_payer: address; @@ -273,10 +460,6 @@ spec aptos_framework::transaction_validation { txn_max_gas_units: u64; gas_units_remaining: u64; - // Precondition: Governed Gas Pool must be initialized - requires exists(@aptos_framework); - requires exists>(governed_gas_pool_address()); - // Check transaction invariants. aborts_if !(txn_max_gas_units >= gas_units_remaining); let gas_used = txn_max_gas_units - gas_units_remaining; @@ -288,43 +471,19 @@ spec aptos_framework::transaction_validation { // TODO(fa_migration) // let pre_balance = global>(gas_payer).coin.value; // let post balance = global>(gas_payer).coin.value; - - // TODO(governed_gas_pool) - let pre_governed_gas_pool_balance = global>(governed_gas_pool_address()).coin.value; - let post governed_gas_pool_balance = global>(governed_gas_pool_address()).coin.value; - let pre_account = global(addr); let post account = global(addr); aborts_if !exists>(gas_payer); aborts_if !exists(addr); aborts_if !(global(addr).sequence_number < MAX_U64); - // TODO(fa_migration) // aborts_if pre_balance < transaction_fee_amount; // ensures balance == pre_balance - transaction_fee_amount + storage_fee_refunded; - // TODO(governd_gas_pool) - ensures governed_gas_pool_balance == pre_governed_gas_pool_balance + transaction_fee_amount; ensures account.sequence_number == pre_account.sequence_number + 1; - // Check fee collection. - let governed_gas_pool_enabled = features::spec_is_enabled(features::GOVERNED_GAS_POOL); - let collect_fee_enabled = features::spec_is_enabled(features::COLLECT_AND_DISTRIBUTE_GAS_FEES); - let collected_fees = global(@aptos_framework).amount; - let aggr = collected_fees.value; - let aggr_val = aggregator::spec_aggregator_get_val(aggr); - let aggr_lim = aggregator::spec_get_limit(aggr); - - /// [high-level-req-3] - aborts_if collect_fee_enabled && !exists(@aptos_framework); - aborts_if collect_fee_enabled && transaction_fee_amount > 0 && aggr_val + transaction_fee_amount > aggr_lim; - // Check burning. // (Check the total supply aggregator when enabled.) - let amount_to_burn = if (collect_fee_enabled) { - 0 - } else { - transaction_fee_amount - storage_fee_refunded - }; + let amount_to_burn = transaction_fee_amount - storage_fee_refunded; let apt_addr = type_info::type_of().account_address; let maybe_apt_supply = global>(apt_addr).supply; let total_supply_enabled = option::spec_is_some(maybe_apt_supply); @@ -340,11 +499,7 @@ spec aptos_framework::transaction_validation { ensures total_supply_enabled ==> apt_supply_value - amount_to_burn == post_apt_supply_value; // Check minting. - let amount_to_mint = if (collect_fee_enabled) { - storage_fee_refunded - } else { - storage_fee_refunded - transaction_fee_amount - }; + let amount_to_mint = storage_fee_refunded - transaction_fee_amount; let total_supply = coin::supply; let post post_total_supply = coin::supply; diff --git a/aptos-move/framework/aptos-framework/sources/util.move b/aptos-move/framework/aptos-framework/sources/util.move index 332afa299c784..1ae94623447f5 100644 --- a/aptos-move/framework/aptos-framework/sources/util.move +++ b/aptos-move/framework/aptos-framework/sources/util.move @@ -8,9 +8,20 @@ module aptos_framework::util { /// Note that this function does not put any constraint on `T`. If code uses this function to /// deserialized a linear value, its their responsibility that the data they deserialize is /// owned. + /// + /// Function would abort if T has signer in it. public(friend) native fun from_bytes(bytes: vector): T; public fun address_from_bytes(bytes: vector): address { from_bytes(bytes) } + + #[test_only] + use std::bcs; + + #[test(s1 = @0x123)] + #[expected_failure(abort_code = 0x10001, location = Self)] + fun test_signer_roundtrip(s1: signer) { + from_bytes(bcs::to_bytes(&s1)); + } } diff --git a/aptos-move/framework/aptos-framework/sources/vesting.move b/aptos-move/framework/aptos-framework/sources/vesting.move index 527b4726ffb3b..2e53859786f74 100644 --- a/aptos-move/framework/aptos-framework/sources/vesting.move +++ b/aptos-move/framework/aptos-framework/sources/vesting.move @@ -53,6 +53,7 @@ module aptos_framework::vesting { use aptos_framework::staking_contract; use aptos_framework::system_addresses; use aptos_framework::timestamp; + use aptos_framework::permissioned_signer; friend aptos_framework::genesis; @@ -90,6 +91,8 @@ module aptos_framework::vesting { const EPERMISSION_DENIED: u64 = 15; /// Zero items were provided to a *_many function. const EVEC_EMPTY_FOR_MANY_FUNCTION: u64 = 16; + /// Current permissioned signer cannot perform vesting operations. + const ENO_VESTING_PERMISSION: u64 = 17; /// Maximum number of shareholders a vesting pool can support. const MAXIMUM_SHAREHOLDERS: u64 = 30; @@ -328,6 +331,22 @@ module aptos_framework::vesting { amount: u64, } + /// Permissions to mutate the vesting config for a given account. + struct VestPermission has copy, drop, store {} + + /// Permissions + inline fun check_vest_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, VestPermission {}), + error::permission_denied(ENO_VESTING_PERMISSION), + ); + } + + /// Grant permission to perform vesting operations on behalf of the master signer. + public fun grant_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, VestPermission {}) + } + #[view] /// Return the address of the underlying stake pool (separate resource account) of the vesting contract. /// @@ -535,6 +554,7 @@ module aptos_framework::vesting { // Optional seed used when creating the staking contract account. contract_creation_seed: vector, ): address acquires AdminStore { + check_vest_permission(admin); assert!( !system_addresses::is_reserved_address(withdrawal_address), error::invalid_argument(EINVALID_WITHDRAWAL_ADDRESS), @@ -596,19 +616,20 @@ module aptos_framework::vesting { commission_percentage, }, ); + } else { + emit_event( + &mut admin_store.create_events, + CreateVestingContractEvent { + operator, + voter, + withdrawal_address, + grant_amount, + vesting_contract_address: contract_address, + staking_pool_address: pool_address, + commission_percentage, + }, + ); }; - emit_event( - &mut admin_store.create_events, - CreateVestingContractEvent { - operator, - voter, - withdrawal_address, - grant_amount, - vesting_contract_address: contract_address, - staking_pool_address: pool_address, - commission_percentage, - }, - ); move_to(&contract_signer, VestingContract { state: VESTING_POOL_ACTIVE, @@ -705,17 +726,18 @@ module aptos_framework::vesting { amount: vested_amount, }, ); + } else { + emit_event( + &mut vesting_contract.vest_events, + VestEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + staking_pool_address: vesting_contract.staking.pool_address, + period_vested: next_period_to_vest, + amount: vested_amount, + }, + ); }; - emit_event( - &mut vesting_contract.vest_events, - VestEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - staking_pool_address: vesting_contract.staking.pool_address, - period_vested: next_period_to_vest, - amount: vested_amount, - }, - ); } /// Call `vest` for many vesting contracts. @@ -769,15 +791,16 @@ module aptos_framework::vesting { amount: total_distribution_amount, }, ); + } else { + emit_event( + &mut vesting_contract.distribute_events, + DistributeEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + amount: total_distribution_amount, + }, + ); }; - emit_event( - &mut vesting_contract.distribute_events, - DistributeEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - amount: total_distribution_amount, - }, - ); } /// Call `distribute` for many vesting contracts. @@ -816,14 +839,15 @@ module aptos_framework::vesting { vesting_contract_address: contract_address, }, ); + } else { + emit_event( + &mut vesting_contract.terminate_events, + TerminateEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + }, + ); }; - emit_event( - &mut vesting_contract.terminate_events, - TerminateEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - }, - ); } /// Withdraw all funds to the preset vesting contract's withdrawal address. This can only be called if the contract @@ -853,15 +877,16 @@ module aptos_framework::vesting { amount, }, ); + } else { + emit_event( + &mut vesting_contract.admin_withdraw_events, + AdminWithdrawEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + amount, + }, + ); }; - emit_event( - &mut vesting_contract.admin_withdraw_events, - AdminWithdrawEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - amount, - }, - ); } public entry fun update_operator( @@ -889,18 +914,19 @@ module aptos_framework::vesting { commission_percentage, }, ); + } else { + emit_event( + &mut vesting_contract.update_operator_events, + UpdateOperatorEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + staking_pool_address: vesting_contract.staking.pool_address, + old_operator, + new_operator, + commission_percentage, + }, + ); }; - emit_event( - &mut vesting_contract.update_operator_events, - UpdateOperatorEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - staking_pool_address: vesting_contract.staking.pool_address, - old_operator, - new_operator, - commission_percentage, - }, - ); } public entry fun update_operator_with_same_commission( @@ -949,17 +975,18 @@ module aptos_framework::vesting { new_voter, }, ); - }; - emit_event( - &mut vesting_contract.update_voter_events, - UpdateVoterEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - staking_pool_address: vesting_contract.staking.pool_address, - old_voter, - new_voter, - }, - ); + } else { + emit_event( + &mut vesting_contract.update_voter_events, + UpdateVoterEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + staking_pool_address: vesting_contract.staking.pool_address, + old_voter, + new_voter, + }, + ); + } } public entry fun reset_lockup( @@ -980,16 +1007,17 @@ module aptos_framework::vesting { new_lockup_expiration_secs: stake::get_lockup_secs(vesting_contract.staking.pool_address), }, ); + } else { + emit_event( + &mut vesting_contract.reset_lockup_events, + ResetLockupEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + staking_pool_address: vesting_contract.staking.pool_address, + new_lockup_expiration_secs: stake::get_lockup_secs(vesting_contract.staking.pool_address), + }, + ); }; - emit_event( - &mut vesting_contract.reset_lockup_events, - ResetLockupEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - staking_pool_address: vesting_contract.staking.pool_address, - new_lockup_expiration_secs: stake::get_lockup_secs(vesting_contract.staking.pool_address), - }, - ); } public entry fun set_beneficiary( @@ -1024,17 +1052,18 @@ module aptos_framework::vesting { new_beneficiary, }, ); + } else { + emit_event( + &mut vesting_contract.set_beneficiary_events, + SetBeneficiaryEvent { + admin: vesting_contract.admin, + vesting_contract_address: contract_address, + shareholder, + old_beneficiary, + new_beneficiary, + }, + ); }; - emit_event( - &mut vesting_contract.set_beneficiary_events, - SetBeneficiaryEvent { - admin: vesting_contract.admin, - vesting_contract_address: contract_address, - shareholder, - old_beneficiary, - new_beneficiary, - }, - ); } /// Remove the beneficiary for the given shareholder. All distributions will sent directly to the shareholder @@ -1044,6 +1073,7 @@ module aptos_framework::vesting { contract_address: address, shareholder: address, ) acquires VestingAccountManagement, VestingContract { + check_vest_permission(account); let vesting_contract = borrow_global_mut(contract_address); let addr = signer::address_of(account); assert!( @@ -1064,7 +1094,7 @@ module aptos_framework::vesting { role: String, role_holder: address, ) acquires VestingAccountManagement, VestingContract { - let vesting_contract = borrow_global_mut(contract_address); + let vesting_contract = borrow_global(contract_address); verify_admin(admin, vesting_contract); if (!exists(contract_address)) { @@ -1108,7 +1138,7 @@ module aptos_framework::vesting { /// This doesn't give the admin total power as the admin would still need to follow the rules set by /// staking_contract and stake modules. public fun get_vesting_account_signer(admin: &signer, contract_address: address): signer acquires VestingContract { - let vesting_contract = borrow_global_mut(contract_address); + let vesting_contract = borrow_global(contract_address); verify_admin(admin, vesting_contract); get_vesting_account_signer_internal(vesting_contract) } @@ -1123,6 +1153,7 @@ module aptos_framework::vesting { admin: &signer, contract_creation_seed: vector, ): (signer, SignerCapability) acquires AdminStore { + check_vest_permission(admin); let admin_store = borrow_global_mut(signer::address_of(admin)); let seed = bcs::to_bytes(&signer::address_of(admin)); vector::append(&mut seed, bcs::to_bytes(&admin_store.nonce)); @@ -1142,6 +1173,7 @@ module aptos_framework::vesting { } fun verify_admin(admin: &signer, vesting_contract: &VestingContract) { + check_vest_permission(admin); assert!(signer::address_of(admin) == vesting_contract.admin, error::unauthenticated(ENOT_ADMIN)); } @@ -1203,12 +1235,6 @@ module aptos_framework::vesting { #[test_only] const VALIDATOR_STATUS_INACTIVE: u64 = 4; - #[test_only] - const MODULE_EVENT: u64 = 26; - - #[test_only] - const OPERATOR_BENEFICIARY_CHANGE: u64 = 39; - #[test_only] public fun setup(aptos_framework: &signer, accounts: &vector
) { use aptos_framework::aptos_account::create_account; @@ -1226,12 +1252,11 @@ module aptos_framework::vesting { vector::for_each_ref(accounts, |addr| { let addr: address = *addr; - if (!account::exists_at(addr)) { - create_account(addr); - }; + create_account(addr); }); - std::features::change_feature_flags_for_testing(aptos_framework, vector[MODULE_EVENT, OPERATOR_BENEFICIARY_CHANGE], vector[]); + // In the test environment, the periodical_reward_rate_decrease feature is initially turned off. + std::features::change_feature_flags_for_testing(aptos_framework, vector[], vector[std::features::get_periodical_reward_rate_decrease_feature()]); } #[test_only] @@ -1452,40 +1477,6 @@ module aptos_framework::vesting { setup_vesting_contract(admin, &vector[@1, @2], &vector[1], admin_address, 0); } - #[test(aptos_framework = @0x1, admin = @0x123)] - #[expected_failure(abort_code = 0x60001, location = aptos_framework::aptos_account)] - public entry fun test_create_vesting_contract_with_invalid_withdrawal_address_should_fail( - aptos_framework: &signer, - admin: &signer, - ) acquires AdminStore { - let admin_address = signer::address_of(admin); - setup(aptos_framework, &vector[admin_address]); - setup_vesting_contract(admin, &vector[@1, @2], &vector[1], @5, 0); - } - - #[test(aptos_framework = @0x1, admin = @0x123)] - #[expected_failure(abort_code = 0x60001, location = aptos_framework::aptos_account)] - public entry fun test_create_vesting_contract_with_missing_withdrawal_account_should_fail( - aptos_framework: &signer, - admin: &signer, - ) acquires AdminStore { - let admin_address = signer::address_of(admin); - setup(aptos_framework, &vector[admin_address]); - setup_vesting_contract(admin, &vector[@1, @2], &vector[1], @11, 0); - } - - #[test(aptos_framework = @0x1, admin = @0x123)] - #[expected_failure(abort_code = 0x60002, location = aptos_framework::aptos_account)] - public entry fun test_create_vesting_contract_with_unregistered_withdrawal_account_should_fail( - aptos_framework: &signer, - admin: &signer, - ) acquires AdminStore { - let admin_address = signer::address_of(admin); - setup(aptos_framework, &vector[admin_address]); - create_account_for_test(@11); - setup_vesting_contract(admin, &vector[@1, @2], &vector[1], @11, 0); - } - #[test(aptos_framework = @0x1)] #[expected_failure(abort_code = 0x10002, location = Self)] public entry fun test_create_empty_vesting_schedule_should_fail(aptos_framework: &signer) { @@ -2009,8 +2000,7 @@ module aptos_framework::vesting { } #[test(aptos_framework = @0x1, admin = @0x123)] - #[expected_failure(abort_code = 0x60001, location = aptos_framework::aptos_account)] - public entry fun test_set_beneficiary_with_missing_account_should_fail( + public entry fun test_set_beneficiary_with_missing_account( aptos_framework: &signer, admin: &signer, ) acquires AdminStore, VestingContract { @@ -2022,11 +2012,12 @@ module aptos_framework::vesting { } #[test(aptos_framework = @0x1, admin = @0x123)] - #[expected_failure(abort_code = 0x60002, location = aptos_framework::aptos_account)] - public entry fun test_set_beneficiary_with_unregistered_account_should_fail( + public entry fun test_set_beneficiary_with_unregistered_account( aptos_framework: &signer, admin: &signer, ) acquires AdminStore, VestingContract { + let fa_feature = std::features::get_new_accounts_default_to_fa_store_feature(); + std::features::change_feature_flags_for_testing(aptos_framework, vector[], vector[fa_feature]); let admin_address = signer::address_of(admin); setup(aptos_framework, &vector[admin_address]); let contract_address = setup_vesting_contract( diff --git a/aptos-move/framework/aptos-framework/sources/vesting.spec.move b/aptos-move/framework/aptos-framework/sources/vesting.spec.move index dfb445b95ab68..0247621d95339 100644 --- a/aptos-move/framework/aptos-framework/sources/vesting.spec.move +++ b/aptos-move/framework/aptos-framework/sources/vesting.spec.move @@ -105,13 +105,20 @@ spec aptos_framework::vesting { /// spec module { pragma verify = true; - pragma aborts_if_is_strict; + pragma aborts_if_is_partial; // property 2: The vesting pool should not exceed a maximum of 30 shareholders. /// [high-level-spec-2] invariant forall a: address where exists(a): global(a).grant_pool.shareholders_limit <= MAXIMUM_SHAREHOLDERS; } + spec schema AbortsIfPermissionedSigner { + use aptos_framework::permissioned_signer; + s: signer; + let perm = VestPermission {}; + aborts_if !permissioned_signer::spec_check_permission_exists(s, perm); + } + spec stake_pool_address(vesting_contract_address: address): address { aborts_if !exists(vesting_contract_address); } @@ -163,12 +170,8 @@ spec aptos_framework::vesting { spec schema TotalAccumulatedRewardsAbortsIf { vesting_contract_address: address; - // Note: commission percentage should not be under 0 or higher than 100, cause it's a percentage number - // This requirement will solve the timeout issue of total_accumulated_rewards - // However, accumulated_rewards is still timeout - requires staking_contract.commission_percentage >= 0 && staking_contract.commission_percentage <= 100; - include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address}; + include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address}; let vesting_contract = global(vesting_contract_address); let staker = vesting_contract_address; @@ -225,14 +228,14 @@ spec aptos_framework::vesting { } spec shareholders(vesting_contract_address: address): vector
{ - include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address}; + include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address}; } spec fun spec_shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address; spec shareholder(vesting_contract_address: address, shareholder_or_beneficiary: address): address { pragma opaque; - include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address}; + include ActiveVestingContractAbortsIf{contract_address: vesting_contract_address}; ensures [abstract] result == spec_shareholder(vesting_contract_address, shareholder_or_beneficiary); } @@ -294,7 +297,6 @@ spec aptos_framework::vesting { // TODO: Calls `unlock_rewards` in loop. pragma verify = false; aborts_if len(contract_addresses) == 0; - include PreconditionAbortsIf; } spec vest(contract_address: address) { @@ -307,20 +309,12 @@ spec aptos_framework::vesting { // TODO: Calls `vest` in loop. pragma verify = false; aborts_if len(contract_addresses) == 0; - include PreconditionAbortsIf; - } - - spec schema PreconditionAbortsIf { - contract_addresses: vector
; - - requires forall i in 0..len(contract_addresses): simple_map::spec_get(global(contract_addresses[i]).staking_contracts, global(contract_addresses[i]).staking.operator).commission_percentage >= 0 - && simple_map::spec_get(global(contract_addresses[i]).staking_contracts, global(contract_addresses[i]).staking.operator).commission_percentage <= 100; } spec distribute(contract_address: address) { // TODO: Can't handle abort in loop. pragma verify = false; - include ActiveVestingContractAbortsIf; + include ActiveVestingContractAbortsIf; let vesting_contract = global(contract_address); include WithdrawStakeAbortsIf { vesting_contract }; @@ -335,7 +329,7 @@ spec aptos_framework::vesting { spec terminate_vesting_contract(admin: &signer, contract_address: address) { // TODO: Calls `staking_contract::distribute` which is not verified. pragma verify = false; - include ActiveVestingContractAbortsIf; + include ActiveVestingContractAbortsIf; let vesting_contract = global(contract_address); include WithdrawStakeAbortsIf { vesting_contract }; @@ -438,8 +432,9 @@ spec aptos_framework::vesting { // TODO: set because of timeout (property proved) pragma verify_duration_estimate = 300; pragma aborts_if_is_partial; - aborts_if !account::exists_at(new_beneficiary); - aborts_if !coin::spec_is_account_registered(new_beneficiary); + aborts_if !account::spec_exists_at(new_beneficiary); + // TODO(fa_migration) + // aborts_if !coin::spec_is_account_registered(new_beneficiary); include VerifyAdminAbortsIf; let post vesting_contract = global(contract_address); ensures simple_map::spec_contains_key(vesting_contract.beneficiaries,shareholder); @@ -500,6 +495,7 @@ spec aptos_framework::vesting { } spec get_vesting_account_signer(admin: &signer, contract_address: address): signer { + pragma verify_duration_estimate = 120; include VerifyAdminAbortsIf; } @@ -528,7 +524,7 @@ spec aptos_framework::vesting { aborts_if !exists(admin_addr); aborts_if len(account::ZERO_AUTH_KEY) != 32; aborts_if admin_store.nonce + 1 > MAX_U64; - let ea = account::exists_at(resource_addr); + let ea = account::spec_exists_at(resource_addr); include if (ea) account::CreateResourceAccountAbortsIf else account::CreateAccountAbortsIf {addr: resource_addr}; let acc = global(resource_addr); @@ -543,8 +539,11 @@ spec aptos_framework::vesting { } spec verify_admin(admin: &signer, vesting_contract: &VestingContract) { + pragma verify_duration_estimate = 120; + aborts_if permissioned_signer::spec_is_permissioned_signer(admin); /// [high-level-req-9] aborts_if signer::address_of(admin) != vesting_contract.admin; + // include AbortsIfPermissionedSigner { s: admin }; } spec assert_vesting_contract_exists(contract_address: address) { @@ -553,7 +552,7 @@ spec aptos_framework::vesting { } spec assert_active_vesting_contract(contract_address: address) { - include ActiveVestingContractAbortsIf; + include ActiveVestingContractAbortsIf; } spec unlock_stake(vesting_contract: &VestingContract, amount: u64) { @@ -643,12 +642,14 @@ spec aptos_framework::vesting { spec schema VerifyAdminAbortsIf { contract_address: address; admin: signer; + + aborts_if permissioned_signer::spec_is_permissioned_signer(admin); aborts_if !exists(contract_address); let vesting_contract = global(contract_address); aborts_if signer::address_of(admin) != vesting_contract.admin; } - spec schema ActiveVestingContractAbortsIf { + spec schema ActiveVestingContractAbortsIf { contract_address: address; /// [high-level-spec-5] aborts_if !exists(contract_address); diff --git a/aptos-move/framework/aptos-framework/sources/voting.move b/aptos-move/framework/aptos-framework/sources/voting.move index 3bc26528ba736..8dd1b0cb2e9a5 100644 --- a/aptos-move/framework/aptos-framework/sources/voting.move +++ b/aptos-move/framework/aptos-framework/sources/voting.move @@ -34,6 +34,7 @@ module aptos_framework::voting { use aptos_framework::account; use aptos_framework::event::{Self, EventHandle}; + use aptos_framework::permissioned_signer; use aptos_framework::timestamp; use aptos_framework::transaction_context; use aptos_std::from_bcs; @@ -63,6 +64,8 @@ module aptos_framework::voting { const ESINGLE_STEP_PROPOSAL_CANNOT_HAVE_NEXT_EXECUTION_HASH: u64 = 11; /// Cannot call `is_multi_step_proposal_in_execution()` on single-step proposals. const EPROPOSAL_IS_SINGLE_STEP: u64 = 12; + /// Cannot call `is_multi_step_proposal_in_execution()` on single-step proposals. + const ENO_VOTE_PERMISSION: u64 = 13; /// ProposalStateEnum representing proposal state. const PROPOSAL_STATE_PENDING: u64 = 0; @@ -188,7 +191,23 @@ module aptos_framework::voting { num_votes: u64, } + struct VotePermission has copy, drop, store {} + + /// Permissions + inline fun check_vote_permission(s: &signer) { + assert!( + permissioned_signer::check_permission_exists(s, VotePermission {}), + error::permission_denied(ENO_VOTE_PERMISSION), + ); + } + + /// Grant permission to vote on behalf of the master signer. + public fun grant_permission(master: &signer, permissioned_signer: &signer) { + permissioned_signer::authorize_unlimited(master, permissioned_signer, VotePermission {}) + } + public fun register(account: &signer) { + check_vote_permission(account); let addr = signer::address_of(account); assert!(!exists>(addr), error::already_exists(EVOTING_FORUM_ALREADY_REGISTERED)); @@ -210,14 +229,15 @@ module aptos_framework::voting { proposal_type_info: type_info::type_of(), }, ); + } else { + event::emit_event( + &mut voting_forum.events.register_forum_events, + RegisterForumEvent { + hosting_account: addr, + proposal_type_info: type_info::type_of(), + }, + ); }; - event::emit_event( - &mut voting_forum.events.register_forum_events, - RegisterForumEvent { - hosting_account: addr, - proposal_type_info: type_info::type_of(), - }, - ); move_to(account, voting_forum); } @@ -305,7 +325,7 @@ module aptos_framework::voting { simple_map::add(&mut metadata, is_multi_step_in_execution_key, to_bytes(&false)); // If the proposal is a single-step proposal, we check if the metadata passed by the client has the IS_MULTI_STEP_PROPOSAL_IN_EXECUTION_KEY key. // If they have the key, we will remove it, because a single-step proposal that doesn't need this key. - } else if (simple_map::contains_key(&mut metadata, &is_multi_step_in_execution_key)) { + } else if (simple_map::contains_key(&metadata, &is_multi_step_in_execution_key)) { simple_map::remove(&mut metadata, &is_multi_step_in_execution_key); }; @@ -335,19 +355,19 @@ module aptos_framework::voting { min_vote_threshold, }, ); + } else { + event::emit_event( + &mut voting_forum.events.create_proposal_events, + CreateProposalEvent { + proposal_id, + early_resolution_vote_threshold, + execution_hash, + expiration_secs, + metadata, + min_vote_threshold, + }, + ); }; - event::emit_event( - &mut voting_forum.events.create_proposal_events, - CreateProposalEvent { - proposal_id, - early_resolution_vote_threshold, - execution_hash, - expiration_secs, - metadata, - min_vote_threshold, - }, - ); - proposal_id } @@ -399,11 +419,12 @@ module aptos_framework::voting { if (std::features::module_event_migration_enabled()) { event::emit(Vote { proposal_id, num_votes }); + } else { + event::emit_event( + &mut voting_forum.events.vote_events, + VoteEvent { proposal_id, num_votes }, + ); }; - event::emit_event( - &mut voting_forum.events.vote_events, - VoteEvent { proposal_id, num_votes }, - ); } /// Common checks on if a proposal is resolvable, regardless if the proposal is single-step or multi-step. @@ -467,16 +488,17 @@ module aptos_framework::voting { resolved_early, }, ); + } else { + event::emit_event( + &mut voting_forum.events.resolve_proposal_events, + ResolveProposal { + proposal_id, + yes_votes: proposal.yes_votes, + no_votes: proposal.no_votes, + resolved_early, + }, + ); }; - event::emit_event( - &mut voting_forum.events.resolve_proposal_events, - ResolveProposal { - proposal_id, - yes_votes: proposal.yes_votes, - no_votes: proposal.no_votes, - resolved_early, - }, - ); option::extract(&mut proposal.execution_content) } @@ -556,17 +578,17 @@ module aptos_framework::voting { resolved_early, }, ); + } else { + event::emit_event( + &mut voting_forum.events.resolve_proposal_events, + ResolveProposal { + proposal_id, + yes_votes: proposal.yes_votes, + no_votes: proposal.no_votes, + resolved_early, + }, + ); }; - event::emit_event( - &mut voting_forum.events.resolve_proposal_events, - ResolveProposal { - proposal_id, - yes_votes: proposal.yes_votes, - no_votes: proposal.no_votes, - resolved_early, - }, - ); - } #[view] diff --git a/aptos-move/framework/aptos-framework/sources/voting.spec.move b/aptos-move/framework/aptos-framework/sources/voting.spec.move index a1c05091e54b6..083acff020461 100644 --- a/aptos-move/framework/aptos-framework/sources/voting.spec.move +++ b/aptos-move/framework/aptos-framework/sources/voting.spec.move @@ -40,19 +40,27 @@ spec aptos_framework::voting { /// spec module { pragma verify = true; - pragma aborts_if_is_strict; + pragma aborts_if_is_partial; + } + + spec schema AbortsIfPermissionedSigner { + use aptos_framework::permissioned_signer; + s: signer; + let perm = VotePermission {}; + aborts_if !permissioned_signer::spec_check_permission_exists(s, perm); } spec register(account: &signer) { + // include AbortsIfPermissionedSigner { s: account }; let addr = signer::address_of(account); // Will abort if there's already a `VotingForum` under addr aborts_if exists>(addr); // Creation of 4 new event handles changes the account's `guid_creation_num` - aborts_if !exists(addr); - let register_account = global(addr); - aborts_if register_account.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM; - aborts_if register_account.guid_creation_num + 4 > MAX_U64; + // aborts_if !exists(addr); + // let register_account = global(addr); + // aborts_if register_account.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM; + // aborts_if register_account.guid_creation_num + 4 > MAX_U64; // `type_info::type_of()` may abort if the type parameter is not a struct aborts_if !type_info::spec_is_struct(); diff --git a/aptos-move/framework/aptos-framework/tests/account_abstraction_tests.move b/aptos-move/framework/aptos-framework/tests/account_abstraction_tests.move new file mode 100644 index 0000000000000..8e7a25e1ea0bd --- /dev/null +++ b/aptos-move/framework/aptos-framework/tests/account_abstraction_tests.move @@ -0,0 +1,17 @@ +#[test_only] +module aptos_framework::account_abstraction_tests { + use std::signer; + use aptos_framework::auth_data::AbstractionAuthData; + use aptos_framework::object; + + public fun invalid_authenticate( + account: signer, + _signing_data: AbstractionAuthData, + ): signer { + let addr = signer::address_of(&account); + let cref = object::create_object(addr); + object::generate_signer(&cref) + } + + public fun test_auth(account: signer, _data: AbstractionAuthData): signer { account } +} diff --git a/aptos-move/framework/aptos-framework/tests/aggregator_tests.move b/aptos-move/framework/aptos-framework/tests/aggregator_tests.move index 9350c245268d7..5a4ae5fa0c469 100644 --- a/aptos-move/framework/aptos-framework/tests/aggregator_tests.move +++ b/aptos-move/framework/aptos-framework/tests/aggregator_tests.move @@ -4,10 +4,12 @@ module aptos_framework::aggregator_tests { use aptos_framework::aggregator; use aptos_framework::aggregator_factory; + const MAX_U128: u128 = 340282366920938463463374607431768211455; + #[test(account = @aptos_framework)] fun test_can_add_and_sub_and_read(account: signer) { aggregator_factory::initialize_aggregator_factory_for_test(&account); - let aggregator = aggregator_factory::create_aggregator(&account, 1000); + let aggregator = aggregator_factory::create_aggregator_for_test(); aggregator::add(&mut aggregator, 12); assert!(aggregator::read(&aggregator) == 12, 0); @@ -30,7 +32,8 @@ module aptos_framework::aggregator_tests { #[expected_failure(abort_code = 0x020001, location = aptos_framework::aggregator)] fun test_overflow(account: signer) { aggregator_factory::initialize_aggregator_factory_for_test(&account); - let aggregator = aggregator_factory::create_aggregator(&account, 10); + let aggregator = aggregator_factory::create_aggregator_for_test(); + aggregator::add(&mut aggregator, MAX_U128 - 10); // Overflow! aggregator::add(&mut aggregator, 12); @@ -42,7 +45,7 @@ module aptos_framework::aggregator_tests { #[expected_failure(abort_code = 0x020002, location = aptos_framework::aggregator)] fun test_underflow(account: signer) { aggregator_factory::initialize_aggregator_factory_for_test(&account); - let aggregator = aggregator_factory::create_aggregator(&account, 10); + let aggregator = aggregator_factory::create_aggregator_for_test(); // Underflow! aggregator::sub(&mut aggregator, 100); diff --git a/aptos-move/framework/aptos-framework/tests/clamped_token.move b/aptos-move/framework/aptos-framework/tests/clamped_token.move new file mode 100644 index 0000000000000..d5994bb298c07 --- /dev/null +++ b/aptos-move/framework/aptos-framework/tests/clamped_token.move @@ -0,0 +1,92 @@ +#[test_only] +module 0xcafe::clamped_token { + // Create a token with max amount one can withdraw on each withdraw call. + + use aptos_framework::fungible_asset::{Self, FungibleAsset, RawBalanceRef, RawSupplyRef, TransferRef}; + use aptos_framework::dispatchable_fungible_asset; + use aptos_framework::object::{ConstructorRef, Object}; + use aptos_framework::function_info; + + use std::option; + use std::option::Option; + use std::signer; + use std::string; + + struct BalanceStore has key { + balance_ref: RawBalanceRef, + supply_ref: RawSupplyRef, + } + + public fun initialize(account: &signer, constructor_ref: &ConstructorRef) { + assert!(signer::address_of(account) == @0xcafe, 1); + let balance_ref = fungible_asset::generate_raw_balance_ref(constructor_ref); + let supply_ref = fungible_asset::generate_raw_supply_ref(constructor_ref); + move_to(account, BalanceStore { balance_ref, supply_ref }); + + let balance_value = function_info::new_function_info( + account, + string::utf8(b"clamped_token"), + string::utf8(b"derived_balance"), + ); + let supply_value = function_info::new_function_info( + account, + string::utf8(b"clamped_token"), + string::utf8(b"derived_supply"), + ); + + let withdraw = function_info::new_function_info( + account, + string::utf8(b"clamped_token"), + string::utf8(b"withdraw"), + ); + + let deposit = function_info::new_function_info( + account, + string::utf8(b"clamped_token"), + string::utf8(b"deposit"), + ); + + dispatchable_fungible_asset::register_dispatch_functions( + constructor_ref, + option::some(withdraw), + option::some(deposit), + option::some(balance_value) + ); + dispatchable_fungible_asset::register_derive_supply_dispatch_function( + constructor_ref, + option::some(supply_value) + ); + } + + public fun derived_balance(store: Object): u64 acquires BalanceStore { + fungible_asset::balance_with_ref( + &borrow_global(@0xcafe).balance_ref, + store + ) + } + + public fun derived_supply(metadata: Object): Option acquires BalanceStore { + option::some(option::extract(&mut fungible_asset::supply_with_ref( + &borrow_global(@0xcafe).supply_ref, + metadata + ))) + } + + public fun withdraw( + store: Object, + amount: u64, + transfer_ref: &TransferRef, + ): FungibleAsset { + // Clamp the max amount of asset to withdraw: at most 10 can be withdrawn each call. + assert!(amount <= 10, 0); + fungible_asset::withdraw_with_ref(transfer_ref, store, amount) + } + + public fun deposit( + store: Object, + fa: FungibleAsset, + transfer_ref: &TransferRef, + ) { + fungible_asset::deposit_with_ref(transfer_ref, store, fa) + } +} diff --git a/aptos-move/framework/aptos-framework/tests/clamped_token_tests.move b/aptos-move/framework/aptos-framework/tests/clamped_token_tests.move new file mode 100644 index 0000000000000..ae6daee527137 --- /dev/null +++ b/aptos-move/framework/aptos-framework/tests/clamped_token_tests.move @@ -0,0 +1,55 @@ +#[test_only] +module aptos_framework::clamped_token_tests { + use aptos_framework::fungible_asset::{Self, Metadata, TestToken}; + use aptos_framework::dispatchable_fungible_asset; + use aptos_framework::object; + use 0xcafe::clamped_token; + use std::option; + + #[test(creator = @0xcafe)] + fun test_clamped( + creator: &signer, + ) { + let (creator_ref, token_object) = fungible_asset::create_test_token(creator); + let (mint, _, _, _) = fungible_asset::init_test_metadata(&creator_ref); + let metadata = object::convert(token_object); + + let creator_store = fungible_asset::create_test_store(creator, metadata); + + clamped_token::initialize(creator, &creator_ref); + + assert!(dispatchable_fungible_asset::derived_supply(metadata) == option::some(0), 2); + // Mint + let fa = fungible_asset::mint(&mint, 100); + dispatchable_fungible_asset::deposit(creator_store, fa); + + assert!(dispatchable_fungible_asset::derived_balance(creator_store) == 100, 4); + assert!(dispatchable_fungible_asset::derived_supply(metadata) == option::some(100), 5); + + let fa = dispatchable_fungible_asset::withdraw(creator, creator_store, 5); + dispatchable_fungible_asset::deposit(creator_store, fa); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 0, location = 0xcafe::clamped_token)] + fun test_clamped_aborted( + creator: &signer, + ) { + let (creator_ref, token_object) = fungible_asset::create_test_token(creator); + let (mint, _, _, _) = fungible_asset::init_test_metadata(&creator_ref); + let metadata = object::convert(token_object); + + let creator_store = fungible_asset::create_test_store(creator, metadata); + + clamped_token::initialize(creator, &creator_ref); + + assert!(dispatchable_fungible_asset::derived_supply(metadata) == option::some(0), 2); + // Mint + let fa = fungible_asset::mint(&mint, 100); + dispatchable_fungible_asset::deposit(creator_store, fa); + + // Failed to withdraw as it exceeds the withdraw limit. + let fa = dispatchable_fungible_asset::withdraw(creator, creator_store, 20); + dispatchable_fungible_asset::deposit(creator_store, fa); + } +} diff --git a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/jwks.md b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/jwks.md new file mode 100644 index 0000000000000..7fbe90227b29b --- /dev/null +++ b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/jwks.md @@ -0,0 +1,1885 @@ + + + +# Module `0x1::jwks` + +JWK functions and structs. + +Note: An important design constraint for this module is that the JWK consensus Rust code is unable to +spawn a VM and make a Move function call. Instead, the JWK consensus Rust code will have to directly +write some of the resources in this file. As a result, the structs in this file are declared so as to +have a simple layout which is easily accessible in Rust. + + +- [Struct `OIDCProvider`](#0x1_jwks_OIDCProvider) +- [Resource `SupportedOIDCProviders`](#0x1_jwks_SupportedOIDCProviders) +- [Struct `UnsupportedJWK`](#0x1_jwks_UnsupportedJWK) +- [Struct `RSA_JWK`](#0x1_jwks_RSA_JWK) +- [Struct `JWK`](#0x1_jwks_JWK) +- [Struct `ProviderJWKs`](#0x1_jwks_ProviderJWKs) +- [Struct `AllProvidersJWKs`](#0x1_jwks_AllProvidersJWKs) +- [Resource `ObservedJWKs`](#0x1_jwks_ObservedJWKs) +- [Struct `ObservedJWKsUpdated`](#0x1_jwks_ObservedJWKsUpdated) +- [Struct `Patch`](#0x1_jwks_Patch) +- [Struct `PatchRemoveAll`](#0x1_jwks_PatchRemoveAll) +- [Struct `PatchRemoveIssuer`](#0x1_jwks_PatchRemoveIssuer) +- [Struct `PatchRemoveJWK`](#0x1_jwks_PatchRemoveJWK) +- [Struct `PatchUpsertJWK`](#0x1_jwks_PatchUpsertJWK) +- [Resource `Patches`](#0x1_jwks_Patches) +- [Resource `PatchedJWKs`](#0x1_jwks_PatchedJWKs) +- [Resource `FederatedJWKs`](#0x1_jwks_FederatedJWKs) +- [Constants](#@Constants_0) +- [Function `patch_federated_jwks`](#0x1_jwks_patch_federated_jwks) +- [Function `update_federated_jwk_set`](#0x1_jwks_update_federated_jwk_set) +- [Function `get_patched_jwk`](#0x1_jwks_get_patched_jwk) +- [Function `try_get_patched_jwk`](#0x1_jwks_try_get_patched_jwk) +- [Function `upsert_oidc_provider`](#0x1_jwks_upsert_oidc_provider) +- [Function `upsert_oidc_provider_for_next_epoch`](#0x1_jwks_upsert_oidc_provider_for_next_epoch) +- [Function `remove_oidc_provider`](#0x1_jwks_remove_oidc_provider) +- [Function `remove_oidc_provider_for_next_epoch`](#0x1_jwks_remove_oidc_provider_for_next_epoch) +- [Function `on_new_epoch`](#0x1_jwks_on_new_epoch) +- [Function `set_patches`](#0x1_jwks_set_patches) +- [Function `new_patch_remove_all`](#0x1_jwks_new_patch_remove_all) +- [Function `new_patch_remove_issuer`](#0x1_jwks_new_patch_remove_issuer) +- [Function `new_patch_remove_jwk`](#0x1_jwks_new_patch_remove_jwk) +- [Function `new_patch_upsert_jwk`](#0x1_jwks_new_patch_upsert_jwk) +- [Function `new_rsa_jwk`](#0x1_jwks_new_rsa_jwk) +- [Function `new_unsupported_jwk`](#0x1_jwks_new_unsupported_jwk) +- [Function `initialize`](#0x1_jwks_initialize) +- [Function `remove_oidc_provider_internal`](#0x1_jwks_remove_oidc_provider_internal) +- [Function `upsert_into_observed_jwks`](#0x1_jwks_upsert_into_observed_jwks) +- [Function `remove_issuer_from_observed_jwks`](#0x1_jwks_remove_issuer_from_observed_jwks) +- [Function `regenerate_patched_jwks`](#0x1_jwks_regenerate_patched_jwks) +- [Function `try_get_jwk_by_issuer`](#0x1_jwks_try_get_jwk_by_issuer) +- [Function `try_get_jwk_by_id`](#0x1_jwks_try_get_jwk_by_id) +- [Function `get_jwk_id`](#0x1_jwks_get_jwk_id) +- [Function `upsert_provider_jwks`](#0x1_jwks_upsert_provider_jwks) +- [Function `remove_issuer`](#0x1_jwks_remove_issuer) +- [Function `upsert_jwk`](#0x1_jwks_upsert_jwk) +- [Function `remove_jwk`](#0x1_jwks_remove_jwk) +- [Function `apply_patch`](#0x1_jwks_apply_patch) +- [Specification](#@Specification_1) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + + +
use 0x1::bcs;
+use 0x1::chain_status;
+use 0x1::comparator;
+use 0x1::config_buffer;
+use 0x1::copyable_any;
+use 0x1::error;
+use 0x1::event;
+use 0x1::option;
+use 0x1::reconfiguration;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::vector;
+
+ + + + + +## Struct `OIDCProvider` + +An OIDC provider. + + +
struct OIDCProvider has copy, drop, store
+
+ + + +
+Fields + + +
+
+name: vector<u8> +
+
+ The utf-8 encoded issuer string. E.g., b"https://www.facebook.com". +
+
+config_url: vector<u8> +
+
+ The ut8-8 encoded OpenID configuration URL of the provider. + E.g., b"https://www.facebook.com/.well-known/openid-configuration/". +
+
+ + +
+ + + +## Resource `SupportedOIDCProviders` + +A list of OIDC providers whose JWKs should be watched by validators. Maintained by governance proposals. + + +
struct SupportedOIDCProviders has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+providers: vector<jwks::OIDCProvider> +
+
+ +
+
+ + +
+ + + +## Struct `UnsupportedJWK` + +An JWK variant that represents the JWKs which were observed but not yet supported by Aptos. +Observing UnsupportedJWKs means the providers adopted a new key type/format, and the system should be updated. + + +
struct UnsupportedJWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+id: vector<u8> +
+
+ +
+
+payload: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `RSA_JWK` + +A JWK variant where kty is RSA. + + +
struct RSA_JWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+kid: string::String +
+
+ +
+
+kty: string::String +
+
+ +
+
+alg: string::String +
+
+ +
+
+e: string::String +
+
+ +
+
+n: string::String +
+
+ +
+
+ + +
+ + + +## Struct `JWK` + +A JSON web key. + + +
struct JWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+variant: copyable_any::Any +
+
+ A JWK variant packed as an Any. + Currently the variant type is one of the following. + - RSA_JWK + - UnsupportedJWK +
+
+ + +
+ + + +## Struct `ProviderJWKs` + +A provider and its JWKs. + + +
struct ProviderJWKs has copy, drop, store
+
+ + + +
+Fields + + +
+
+issuer: vector<u8> +
+
+ The utf-8 encoding of the issuer string (e.g., "https://www.facebook.com"). +
+
+version: u64 +
+
+ A version number is needed by JWK consensus to dedup the updates. + e.g, when on chain version = 5, multiple nodes can propose an update with version = 6. + Bumped every time the JWKs for the current issuer is updated. + The Rust authenticator only uses the latest version. +
+
+jwks: vector<jwks::JWK> +
+
+ Vector of JWK's sorted by their unique ID (from get_jwk_id) in dictionary order. +
+
+ + +
+ + + +## Struct `AllProvidersJWKs` + +Multiple ProviderJWKs objects, indexed by issuer and key ID. + + +
struct AllProvidersJWKs has copy, drop, store
+
+ + + +
+Fields + + +
+
+entries: vector<jwks::ProviderJWKs> +
+
+ Vector of ProviderJWKs sorted by ProviderJWKs::issuer in dictionary order. +
+
+ + +
+ + + +## Resource `ObservedJWKs` + +The AllProvidersJWKs that validators observed and agreed on. + + +
struct ObservedJWKs has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Struct `ObservedJWKsUpdated` + +When ObservedJWKs is updated, this event is sent to resync the JWK consensus state in all validators. + + +
#[event]
+struct ObservedJWKsUpdated has drop, store
+
+ + + +
+Fields + + +
+
+epoch: u64 +
+
+ +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Struct `Patch` + +A small edit or patch that is applied to a AllProvidersJWKs to obtain PatchedJWKs. + + +
struct Patch has copy, drop, store
+
+ + + +
+Fields + + +
+
+variant: copyable_any::Any +
+
+ A Patch variant packed as an Any. + Currently the variant type is one of the following. + - PatchRemoveAll + - PatchRemoveIssuer + - PatchRemoveJWK + - PatchUpsertJWK +
+
+ + +
+ + + +## Struct `PatchRemoveAll` + +A Patch variant to remove all JWKs. + + +
struct PatchRemoveAll has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `PatchRemoveIssuer` + +A Patch variant to remove an issuer and all its JWKs. + + +
struct PatchRemoveIssuer has copy, drop, store
+
+ + + +
+Fields + + +
+
+issuer: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `PatchRemoveJWK` + +A Patch variant to remove a specific JWK of an issuer. + + +
struct PatchRemoveJWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+issuer: vector<u8> +
+
+ +
+
+jwk_id: vector<u8> +
+
+ +
+
+ + +
+ + + +## Struct `PatchUpsertJWK` + +A Patch variant to upsert a JWK for an issuer. + + +
struct PatchUpsertJWK has copy, drop, store
+
+ + + +
+Fields + + +
+
+issuer: vector<u8> +
+
+ +
+
+jwk: jwks::JWK +
+
+ +
+
+ + +
+ + + +## Resource `Patches` + +A sequence of Patch objects that are applied *one by one* to the ObservedJWKs. + +Maintained by governance proposals. + + +
struct Patches has key
+
+ + + +
+Fields + + +
+
+patches: vector<jwks::Patch> +
+
+ +
+
+ + +
+ + + +## Resource `PatchedJWKs` + +The result of applying the Patches to the ObservedJWKs. +This is what applications should consume. + + +
struct PatchedJWKs has drop, key
+
+ + + +
+Fields + + +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Resource `FederatedJWKs` + +JWKs for federated keyless accounts are stored in this resource. + + +
struct FederatedJWKs has drop, key
+
+ + + +
+Fields + + +
+
+jwks: jwks::AllProvidersJWKs +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EFEDERATED_JWKS_TOO_LARGE: u64 = 8;
+
+ + + + + + + +
const EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK: u64 = 7;
+
+ + + + + + + +
const EINVALID_FEDERATED_JWK_SET: u64 = 9;
+
+ + + + + + + +
const EISSUER_NOT_FOUND: u64 = 5;
+
+ + + + + + + +
const EJWK_ID_NOT_FOUND: u64 = 6;
+
+ + + + + + + +
const ENATIVE_INCORRECT_VERSION: u64 = 259;
+
+ + + + + + + +
const ENATIVE_MISSING_RESOURCE_OBSERVED_JWKS: u64 = 258;
+
+ + + + + + + +
const ENATIVE_MISSING_RESOURCE_VALIDATOR_SET: u64 = 257;
+
+ + + + + + + +
const ENATIVE_MULTISIG_VERIFICATION_FAILED: u64 = 260;
+
+ + + + + + + +
const ENATIVE_NOT_ENOUGH_VOTING_POWER: u64 = 261;
+
+ + + + + + + +
const EUNEXPECTED_EPOCH: u64 = 1;
+
+ + + + + + + +
const EUNEXPECTED_VERSION: u64 = 2;
+
+ + + + + + + +
const EUNKNOWN_JWK_VARIANT: u64 = 4;
+
+ + + + + + + +
const EUNKNOWN_PATCH_VARIANT: u64 = 3;
+
+ + + + + +We limit the size of a PatchedJWKs resource installed by a dapp owner for federated keyless accounts. +Note: If too large, validators waste work reading it for invalid TXN signatures. + + +
const MAX_FEDERATED_JWKS_SIZE_BYTES: u64 = 2048;
+
+ + + + + +## Function `patch_federated_jwks` + +Called by a federated keyless dapp owner to install the JWKs for the federated OIDC provider (e.g., Auth0, AWS +Cognito, etc). For type-safety, we explicitly use a struct FederatedJWKs { jwks: AllProviderJWKs } instead of +reusing PatchedJWKs { jwks: AllProviderJWKs }, which is a JWK-consensus-specific struct. + + +
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<jwks::Patch>)
+
+ + + +
+Implementation + + +
public fun patch_federated_jwks(jwk_owner: &signer, patches: vector<Patch>) acquires FederatedJWKs {
+    // Prevents accidental calls in 0x1::jwks that install federated JWKs at the Aptos framework address.
+    assert!(!system_addresses::is_aptos_framework_address(signer::address_of(jwk_owner)),
+        error::invalid_argument(EINSTALL_FEDERATED_JWKS_AT_APTOS_FRAMEWORK)
+    );
+
+    let jwk_addr = signer::address_of(jwk_owner);
+    if (!exists<FederatedJWKs>(jwk_addr)) {
+        move_to(jwk_owner, FederatedJWKs { jwks: AllProvidersJWKs { entries: vector[] } });
+    };
+
+    let fed_jwks = borrow_global_mut<FederatedJWKs>(jwk_addr);
+    vector::for_each_ref(&patches, |obj|{
+        let patch: &Patch = obj;
+        apply_patch(&mut fed_jwks.jwks, *patch);
+    });
+
+    // TODO: Can we check the size more efficiently instead of serializing it via BCS?
+    let num_bytes = vector::length(&bcs::to_bytes(fed_jwks));
+    assert!(num_bytes < MAX_FEDERATED_JWKS_SIZE_BYTES, error::invalid_argument(EFEDERATED_JWKS_TOO_LARGE));
+}
+
+ + + +
+ + + +## Function `update_federated_jwk_set` + +This can be called to install or update a set of JWKs for a federated OIDC provider. This function should +be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + +The iss parameter is the value of the iss claim on the JWTs that are to be verified by the JWK set. +kid_vec, alg_vec, e_vec, n_vec are String vectors of the JWK attributes kid, alg, e and n respectively. +See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + +For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - +```json +{ +"keys": [ +{ +"alg": "RS256", +"use": "sig", +"kty": "RSA", +"n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", +"kid": "d7b939771a7800c413f90051012d975981916d71", +"e": "AQAB" +}, +{ +"kty": "RSA", +"kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", +"alg": "RS256", +"n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", +"e": "AQAB", +"use": "sig" +} +] +} +``` + +We can call update_federated_jwk_set for Google's iss - "https://accounts.google.com" and for each vector +argument kid_vec, alg_vec, e_vec, n_vec, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 +the corresponding attribute in the second JWK as shown below. + +```move +use std::string::utf8; +aptos_framework::jwks::update_federated_jwk_set( +jwk_owner, +b"https://accounts.google.com", +vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], +vector[utf8(b"RS256"), utf8(b"RS256")], +vector[utf8(b"AQAB"), utf8(b"AQAB")], +vector[ +utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), +utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") +] +) +``` + +See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + +NOTE: Currently only RSA keys are supported. + + +
public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector<u8>, kid_vec: vector<string::String>, alg_vec: vector<string::String>, e_vec: vector<string::String>, n_vec: vector<string::String>)
+
+ + + +
+Implementation + + +
public entry fun update_federated_jwk_set(jwk_owner: &signer, iss: vector<u8>, kid_vec: vector<String>, alg_vec: vector<String>, e_vec: vector<String>, n_vec: vector<String>) acquires FederatedJWKs {
+    assert!(!vector::is_empty(&kid_vec), error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    let num_jwk = vector::length<String>(&kid_vec);
+    assert!(vector::length(&alg_vec) == num_jwk , error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    assert!(vector::length(&e_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+    assert!(vector::length(&n_vec) == num_jwk, error::invalid_argument(EINVALID_FEDERATED_JWK_SET));
+
+    let remove_all_patch = new_patch_remove_all();
+    let patches = vector[remove_all_patch];
+    while (!vector::is_empty(&kid_vec)) {
+        let kid = vector::pop_back(&mut kid_vec);
+        let alg = vector::pop_back(&mut alg_vec);
+        let e = vector::pop_back(&mut e_vec);
+        let n = vector::pop_back(&mut n_vec);
+        let jwk = new_rsa_jwk(kid, alg, e, n);
+        let patch = new_patch_upsert_jwk(iss, jwk);
+        vector::push_back(&mut patches, patch)
+    };
+    patch_federated_jwks(jwk_owner, patches);
+}
+
+ + + +
+ + + +## Function `get_patched_jwk` + +Get a JWK by issuer and key ID from the PatchedJWKs. +Abort if such a JWK does not exist. +More convenient to call from Rust, since it does not wrap the JWK in an Option. + + +
public fun get_patched_jwk(issuer: vector<u8>, jwk_id: vector<u8>): jwks::JWK
+
+ + + +
+Implementation + + +
public fun get_patched_jwk(issuer: vector<u8>, jwk_id: vector<u8>): JWK acquires PatchedJWKs {
+    option::extract(&mut try_get_patched_jwk(issuer, jwk_id))
+}
+
+ + + +
+ + + +## Function `try_get_patched_jwk` + +Get a JWK by issuer and key ID from the PatchedJWKs, if it exists. +More convenient to call from Move, since it does not abort. + + +
public fun try_get_patched_jwk(issuer: vector<u8>, jwk_id: vector<u8>): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
public fun try_get_patched_jwk(issuer: vector<u8>, jwk_id: vector<u8>): Option<JWK> acquires PatchedJWKs {
+    let jwks = &borrow_global<PatchedJWKs>(@aptos_framework).jwks;
+    try_get_jwk_by_issuer(jwks, issuer, jwk_id)
+}
+
+ + + +
+ + + +## Function `upsert_oidc_provider` + +Deprecated by upsert_oidc_provider_for_next_epoch(). + +TODO: update all the tests that reference this function, then disable this function. + + +
public fun upsert_oidc_provider(fx: &signer, name: vector<u8>, config_url: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun upsert_oidc_provider(fx: &signer, name: vector<u8>, config_url: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let provider_set = borrow_global_mut<SupportedOIDCProviders>(@aptos_framework);
+
+    let old_config_url= remove_oidc_provider_internal(provider_set, name);
+    vector::push_back(&mut provider_set.providers, OIDCProvider { name, config_url });
+    old_config_url
+}
+
+ + + +
+ + + +## Function `upsert_oidc_provider_for_next_epoch` + +Used in on-chain governances to update the supported OIDC providers, effective starting next epoch. +Example usage: +``` +aptos_framework::jwks::upsert_oidc_provider_for_next_epoch( +&framework_signer, +b"https://accounts.google.com", +b"https://accounts.google.com/.well-known/openid-configuration" +); +aptos_framework::aptos_governance::reconfigure(&framework_signer); +``` + + +
public fun upsert_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>, config_url: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun upsert_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>, config_url: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+
+    let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        config_buffer::extract<SupportedOIDCProviders>()
+    } else {
+        *borrow_global<SupportedOIDCProviders>(@aptos_framework)
+    };
+
+    let old_config_url = remove_oidc_provider_internal(&mut provider_set, name);
+    vector::push_back(&mut provider_set.providers, OIDCProvider { name, config_url });
+    config_buffer::upsert(provider_set);
+    old_config_url
+}
+
+ + + +
+ + + +## Function `remove_oidc_provider` + +Deprecated by remove_oidc_provider_for_next_epoch(). + +TODO: update all the tests that reference this function, then disable this function. + + +
public fun remove_oidc_provider(fx: &signer, name: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun remove_oidc_provider(fx: &signer, name: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let provider_set = borrow_global_mut<SupportedOIDCProviders>(@aptos_framework);
+    remove_oidc_provider_internal(provider_set, name)
+}
+
+ + + +
+ + + +## Function `remove_oidc_provider_for_next_epoch` + +Used in on-chain governances to update the supported OIDC providers, effective starting next epoch. +Example usage: +``` +aptos_framework::jwks::remove_oidc_provider_for_next_epoch( +&framework_signer, +b"https://accounts.google.com", +); +aptos_framework::aptos_governance::reconfigure(&framework_signer); +``` + + +
public fun remove_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun remove_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+
+    let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        config_buffer::extract<SupportedOIDCProviders>()
+    } else {
+        *borrow_global<SupportedOIDCProviders>(@aptos_framework)
+    };
+    let ret = remove_oidc_provider_internal(&mut provider_set, name);
+    config_buffer::upsert(provider_set);
+    ret
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending SupportedOIDCProviders, if there is any. + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(framework: &signer) acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(framework);
+    if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        let new_config = config_buffer::extract<SupportedOIDCProviders>();
+        if (exists<SupportedOIDCProviders>(@aptos_framework)) {
+            *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework) = new_config;
+        } else {
+            move_to(framework, new_config);
+        }
+    }
+}
+
+ + + +
+ + + +## Function `set_patches` + +Set the Patches. Only called in governance proposals. + + +
public fun set_patches(fx: &signer, patches: vector<jwks::Patch>)
+
+ + + +
+Implementation + + +
public fun set_patches(fx: &signer, patches: vector<Patch>) acquires Patches, PatchedJWKs, ObservedJWKs {
+    system_addresses::assert_aptos_framework(fx);
+    borrow_global_mut<Patches>(@aptos_framework).patches = patches;
+    regenerate_patched_jwks();
+}
+
+ + + +
+ + + +## Function `new_patch_remove_all` + +Create a Patch that removes all entries. + + +
public fun new_patch_remove_all(): jwks::Patch
+
+ + + +
+Implementation + + +
public fun new_patch_remove_all(): Patch {
+    Patch {
+        variant: copyable_any::pack(PatchRemoveAll {}),
+    }
+}
+
+ + + +
+ + + +## Function `new_patch_remove_issuer` + +Create a Patch that removes the entry of a given issuer, if exists. + + +
public fun new_patch_remove_issuer(issuer: vector<u8>): jwks::Patch
+
+ + + +
+Implementation + + +
public fun new_patch_remove_issuer(issuer: vector<u8>): Patch {
+    Patch {
+        variant: copyable_any::pack(PatchRemoveIssuer { issuer }),
+    }
+}
+
+ + + +
+ + + +## Function `new_patch_remove_jwk` + +Create a Patch that removes the entry of a given issuer, if exists. + + +
public fun new_patch_remove_jwk(issuer: vector<u8>, jwk_id: vector<u8>): jwks::Patch
+
+ + + +
+Implementation + + +
public fun new_patch_remove_jwk(issuer: vector<u8>, jwk_id: vector<u8>): Patch {
+    Patch {
+        variant: copyable_any::pack(PatchRemoveJWK { issuer, jwk_id })
+    }
+}
+
+ + + +
+ + + +## Function `new_patch_upsert_jwk` + +Create a Patch that upserts a JWK into an issuer's JWK set. + + +
public fun new_patch_upsert_jwk(issuer: vector<u8>, jwk: jwks::JWK): jwks::Patch
+
+ + + +
+Implementation + + +
public fun new_patch_upsert_jwk(issuer: vector<u8>, jwk: JWK): Patch {
+    Patch {
+        variant: copyable_any::pack(PatchUpsertJWK { issuer, jwk })
+    }
+}
+
+ + + +
+ + + +## Function `new_rsa_jwk` + +Create a JWK of variant RSA_JWK. + + +
public fun new_rsa_jwk(kid: string::String, alg: string::String, e: string::String, n: string::String): jwks::JWK
+
+ + + +
+Implementation + + +
public fun new_rsa_jwk(kid: String, alg: String, e: String, n: String): JWK {
+    JWK {
+        variant: copyable_any::pack(RSA_JWK {
+            kid,
+            kty: utf8(b"RSA"),
+            e,
+            n,
+            alg,
+        }),
+    }
+}
+
+ + + +
+ + + +## Function `new_unsupported_jwk` + +Create a JWK of variant UnsupportedJWK. + + +
public fun new_unsupported_jwk(id: vector<u8>, payload: vector<u8>): jwks::JWK
+
+ + + +
+Implementation + + +
public fun new_unsupported_jwk(id: vector<u8>, payload: vector<u8>): JWK {
+    JWK {
+        variant: copyable_any::pack(UnsupportedJWK { id, payload })
+    }
+}
+
+ + + +
+ + + +## Function `initialize` + +Initialize some JWK resources. Should only be invoked by genesis. + + +
public fun initialize(fx: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(fx: &signer) {
+    system_addresses::assert_aptos_framework(fx);
+    move_to(fx, SupportedOIDCProviders { providers: vector[] });
+    move_to(fx, ObservedJWKs { jwks: AllProvidersJWKs { entries: vector[] } });
+    move_to(fx, Patches { patches: vector[] });
+    move_to(fx, PatchedJWKs { jwks: AllProvidersJWKs { entries: vector[] } });
+}
+
+ + + +
+ + + +## Function `remove_oidc_provider_internal` + +Helper function that removes an OIDC provider from the SupportedOIDCProviders. +Returns the old config URL of the provider, if any, as an Option. + + +
fun remove_oidc_provider_internal(provider_set: &mut jwks::SupportedOIDCProviders, name: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
fun remove_oidc_provider_internal(provider_set: &mut SupportedOIDCProviders, name: vector<u8>): Option<vector<u8>> {
+    let (name_exists, idx) = vector::find(&provider_set.providers, |obj| {
+        let provider: &OIDCProvider = obj;
+        provider.name == name
+    });
+
+    if (name_exists) {
+        let old_provider = vector::swap_remove(&mut provider_set.providers, idx);
+        option::some(old_provider.config_url)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `upsert_into_observed_jwks` + +Only used by validators to publish their observed JWK update. + +NOTE: It is assumed verification has been done to ensure each update is quorum-certified, +and its version equals to the on-chain version + 1. + + +
public fun upsert_into_observed_jwks(fx: &signer, provider_jwks_vec: vector<jwks::ProviderJWKs>)
+
+ + + +
+Implementation + + +
public fun upsert_into_observed_jwks(fx: &signer, provider_jwks_vec: vector<ProviderJWKs>) acquires ObservedJWKs, PatchedJWKs, Patches {
+    system_addresses::assert_aptos_framework(fx);
+    let observed_jwks = borrow_global_mut<ObservedJWKs>(@aptos_framework);
+    vector::for_each(provider_jwks_vec, |obj| {
+        let provider_jwks: ProviderJWKs = obj;
+        upsert_provider_jwks(&mut observed_jwks.jwks, provider_jwks);
+    });
+
+    let epoch = reconfiguration::current_epoch();
+    emit(ObservedJWKsUpdated { epoch, jwks: observed_jwks.jwks });
+    regenerate_patched_jwks();
+}
+
+ + + +
+ + + +## Function `remove_issuer_from_observed_jwks` + +Only used by governance to delete an issuer from ObservedJWKs, if it exists. + +Return the potentially existing ProviderJWKs of the given issuer. + + +
public fun remove_issuer_from_observed_jwks(fx: &signer, issuer: vector<u8>): option::Option<jwks::ProviderJWKs>
+
+ + + +
+Implementation + + +
public fun remove_issuer_from_observed_jwks(fx: &signer, issuer: vector<u8>): Option<ProviderJWKs> acquires ObservedJWKs, PatchedJWKs, Patches {
+    system_addresses::assert_aptos_framework(fx);
+    let observed_jwks = borrow_global_mut<ObservedJWKs>(@aptos_framework);
+    let old_value = remove_issuer(&mut observed_jwks.jwks, issuer);
+
+    let epoch = reconfiguration::current_epoch();
+    emit(ObservedJWKsUpdated { epoch, jwks: observed_jwks.jwks });
+    regenerate_patched_jwks();
+
+    old_value
+}
+
+ + + +
+ + + +## Function `regenerate_patched_jwks` + +Regenerate PatchedJWKs from ObservedJWKs and Patches and save the result. + + +
fun regenerate_patched_jwks()
+
+ + + +
+Implementation + + +
fun regenerate_patched_jwks() acquires PatchedJWKs, Patches, ObservedJWKs {
+    let jwks = borrow_global<ObservedJWKs>(@aptos_framework).jwks;
+    let patches = borrow_global<Patches>(@aptos_framework);
+    vector::for_each_ref(&patches.patches, |obj|{
+        let patch: &Patch = obj;
+        apply_patch(&mut jwks, *patch);
+    });
+    *borrow_global_mut<PatchedJWKs>(@aptos_framework) = PatchedJWKs { jwks };
+}
+
+ + + +
+ + + +## Function `try_get_jwk_by_issuer` + +Get a JWK by issuer and key ID from an AllProvidersJWKs, if it exists. + + +
fun try_get_jwk_by_issuer(jwks: &jwks::AllProvidersJWKs, issuer: vector<u8>, jwk_id: vector<u8>): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
fun try_get_jwk_by_issuer(jwks: &AllProvidersJWKs, issuer: vector<u8>, jwk_id: vector<u8>): Option<JWK> {
+    let (issuer_found, index) = vector::find(&jwks.entries, |obj| {
+        let provider_jwks: &ProviderJWKs = obj;
+        issuer == provider_jwks.issuer
+    });
+
+    if (issuer_found) {
+        try_get_jwk_by_id(vector::borrow(&jwks.entries, index), jwk_id)
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `try_get_jwk_by_id` + +Get a JWK by key ID from a ProviderJWKs, if it exists. + + +
fun try_get_jwk_by_id(provider_jwks: &jwks::ProviderJWKs, jwk_id: vector<u8>): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
fun try_get_jwk_by_id(provider_jwks: &ProviderJWKs, jwk_id: vector<u8>): Option<JWK> {
+    let (jwk_id_found, index) = vector::find(&provider_jwks.jwks, |obj|{
+        let jwk: &JWK = obj;
+        jwk_id == get_jwk_id(jwk)
+    });
+
+    if (jwk_id_found) {
+        option::some(*vector::borrow(&provider_jwks.jwks, index))
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Function `get_jwk_id` + +Get the ID of a JWK. + + +
fun get_jwk_id(jwk: &jwks::JWK): vector<u8>
+
+ + + +
+Implementation + + +
fun get_jwk_id(jwk: &JWK): vector<u8> {
+    let variant_type_name = *string::bytes(copyable_any::type_name(&jwk.variant));
+    if (variant_type_name == b"0x1::jwks::RSA_JWK") {
+        let rsa = copyable_any::unpack<RSA_JWK>(jwk.variant);
+        *string::bytes(&rsa.kid)
+    } else if (variant_type_name == b"0x1::jwks::UnsupportedJWK") {
+        let unsupported = copyable_any::unpack<UnsupportedJWK>(jwk.variant);
+        unsupported.id
+    } else {
+        abort(error::invalid_argument(EUNKNOWN_JWK_VARIANT))
+    }
+}
+
+ + + +
+ + + +## Function `upsert_provider_jwks` + +Upsert a ProviderJWKs into an AllProvidersJWKs. If this upsert replaced an existing entry, return it. +Maintains the sorted-by-issuer invariant in AllProvidersJWKs. + + +
fun upsert_provider_jwks(jwks: &mut jwks::AllProvidersJWKs, provider_jwks: jwks::ProviderJWKs): option::Option<jwks::ProviderJWKs>
+
+ + + +
+Implementation + + +
fun upsert_provider_jwks(jwks: &mut AllProvidersJWKs, provider_jwks: ProviderJWKs): Option<ProviderJWKs> {
+    // NOTE: Using a linear-time search here because we do not expect too many providers.
+    let found = false;
+    let index = 0;
+    let num_entries = vector::length(&jwks.entries);
+    while (index < num_entries) {
+        let cur_entry = vector::borrow(&jwks.entries, index);
+        let comparison = compare_u8_vector(provider_jwks.issuer, cur_entry.issuer);
+        if (is_greater_than(&comparison)) {
+            index = index + 1;
+        } else {
+            found = is_equal(&comparison);
+            break
+        }
+    };
+
+    // Now if `found == true`, `index` points to the JWK we want to update/remove; otherwise, `index` points to
+    // where we want to insert.
+    let ret = if (found) {
+        let entry = vector::borrow_mut(&mut jwks.entries, index);
+        let old_entry = option::some(*entry);
+        *entry = provider_jwks;
+        old_entry
+    } else {
+        vector::insert(&mut jwks.entries, index, provider_jwks);
+        option::none()
+    };
+
+    ret
+}
+
+ + + +
+ + + +## Function `remove_issuer` + +Remove the entry of an issuer from a AllProvidersJWKs and return the entry, if exists. +Maintains the sorted-by-issuer invariant in AllProvidersJWKs. + + +
fun remove_issuer(jwks: &mut jwks::AllProvidersJWKs, issuer: vector<u8>): option::Option<jwks::ProviderJWKs>
+
+ + + +
+Implementation + + +
fun remove_issuer(jwks: &mut AllProvidersJWKs, issuer: vector<u8>): Option<ProviderJWKs> {
+    let (found, index) = vector::find(&jwks.entries, |obj| {
+        let provider_jwk_set: &ProviderJWKs = obj;
+        provider_jwk_set.issuer == issuer
+    });
+
+    let ret = if (found) {
+        option::some(vector::remove(&mut jwks.entries, index))
+    } else {
+        option::none()
+    };
+
+    ret
+}
+
+ + + +
+ + + +## Function `upsert_jwk` + +Upsert a JWK into a ProviderJWKs. If this upsert replaced an existing entry, return it. + + +
fun upsert_jwk(set: &mut jwks::ProviderJWKs, jwk: jwks::JWK): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
fun upsert_jwk(set: &mut ProviderJWKs, jwk: JWK): Option<JWK> {
+    let found = false;
+    let index = 0;
+    let num_entries = vector::length(&set.jwks);
+    while (index < num_entries) {
+        let cur_entry = vector::borrow(&set.jwks, index);
+        let comparison = compare_u8_vector(get_jwk_id(&jwk), get_jwk_id(cur_entry));
+        if (is_greater_than(&comparison)) {
+            index = index + 1;
+        } else {
+            found = is_equal(&comparison);
+            break
+        }
+    };
+
+    // Now if `found == true`, `index` points to the JWK we want to update/remove; otherwise, `index` points to
+    // where we want to insert.
+    let ret = if (found) {
+        let entry = vector::borrow_mut(&mut set.jwks, index);
+        let old_entry = option::some(*entry);
+        *entry = jwk;
+        old_entry
+    } else {
+        vector::insert(&mut set.jwks, index, jwk);
+        option::none()
+    };
+
+    ret
+}
+
+ + + +
+ + + +## Function `remove_jwk` + +Remove the entry of a key ID from a ProviderJWKs and return the entry, if exists. + + +
fun remove_jwk(jwks: &mut jwks::ProviderJWKs, jwk_id: vector<u8>): option::Option<jwks::JWK>
+
+ + + +
+Implementation + + +
fun remove_jwk(jwks: &mut ProviderJWKs, jwk_id: vector<u8>): Option<JWK> {
+    let (found, index) = vector::find(&jwks.jwks, |obj| {
+        let jwk: &JWK = obj;
+        jwk_id == get_jwk_id(jwk)
+    });
+
+    let ret = if (found) {
+        option::some(vector::remove(&mut jwks.jwks, index))
+    } else {
+        option::none()
+    };
+
+    ret
+}
+
+ + + +
+ + + +## Function `apply_patch` + +Modify an AllProvidersJWKs object with a Patch. +Maintains the sorted-by-issuer invariant in AllProvidersJWKs. + + +
fun apply_patch(jwks: &mut jwks::AllProvidersJWKs, patch: jwks::Patch)
+
+ + + +
+Implementation + + +
fun apply_patch(jwks: &mut AllProvidersJWKs, patch: Patch) {
+    let variant_type_name = *string::bytes(copyable_any::type_name(&patch.variant));
+    if (variant_type_name == b"0x1::jwks::PatchRemoveAll") {
+        jwks.entries = vector[];
+    } else if (variant_type_name == b"0x1::jwks::PatchRemoveIssuer") {
+        let cmd = copyable_any::unpack<PatchRemoveIssuer>(patch.variant);
+        remove_issuer(jwks, cmd.issuer);
+    } else if (variant_type_name == b"0x1::jwks::PatchRemoveJWK") {
+        let cmd = copyable_any::unpack<PatchRemoveJWK>(patch.variant);
+        // TODO: This is inefficient: we remove the issuer, modify its JWKs & and reinsert the updated issuer. Why
+        // not just update it in place?
+        let existing_jwk_set = remove_issuer(jwks, cmd.issuer);
+        if (option::is_some(&existing_jwk_set)) {
+            let jwk_set = option::extract(&mut existing_jwk_set);
+            remove_jwk(&mut jwk_set, cmd.jwk_id);
+            upsert_provider_jwks(jwks, jwk_set);
+        };
+    } else if (variant_type_name == b"0x1::jwks::PatchUpsertJWK") {
+        let cmd = copyable_any::unpack<PatchUpsertJWK>(patch.variant);
+        // TODO: This is inefficient: we remove the issuer, modify its JWKs & and reinsert the updated issuer. Why
+        // not just update it in place?
+        let existing_jwk_set = remove_issuer(jwks, cmd.issuer);
+        let jwk_set = if (option::is_some(&existing_jwk_set)) {
+            option::extract(&mut existing_jwk_set)
+        } else {
+            ProviderJWKs {
+                version: 0,
+                issuer: cmd.issuer,
+                jwks: vector[],
+            }
+        };
+        upsert_jwk(&mut jwk_set, cmd.jwk);
+        upsert_provider_jwks(jwks, jwk_set);
+    } else {
+        abort(std::error::invalid_argument(EUNKNOWN_PATCH_VARIANT))
+    }
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch(framework: &signer)
+
+ + + + +
requires @aptos_framework == std::signer::address_of(framework);
+include config_buffer::OnNewEpochRequirement<SupportedOIDCProviders>;
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/keyless_account.md b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/keyless_account.md new file mode 100644 index 0000000000000..2b69d8329f0b6 --- /dev/null +++ b/aptos-move/framework/aptos-framework/tests/compiler-v2-doc/keyless_account.md @@ -0,0 +1,800 @@ + + + +# Module `0x1::keyless_account` + +This module is responsible for configuring keyless blockchain accounts which were introduced in +[AIP-61](https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-61.md). + + +- [Struct `Group`](#0x1_keyless_account_Group) +- [Resource `Groth16VerificationKey`](#0x1_keyless_account_Groth16VerificationKey) +- [Resource `Configuration`](#0x1_keyless_account_Configuration) +- [Constants](#@Constants_0) +- [Function `new_groth16_verification_key`](#0x1_keyless_account_new_groth16_verification_key) +- [Function `new_configuration`](#0x1_keyless_account_new_configuration) +- [Function `validate_groth16_vk`](#0x1_keyless_account_validate_groth16_vk) +- [Function `update_groth16_verification_key`](#0x1_keyless_account_update_groth16_verification_key) +- [Function `update_configuration`](#0x1_keyless_account_update_configuration) +- [Function `update_training_wheels`](#0x1_keyless_account_update_training_wheels) +- [Function `update_max_exp_horizon`](#0x1_keyless_account_update_max_exp_horizon) +- [Function `remove_all_override_auds`](#0x1_keyless_account_remove_all_override_auds) +- [Function `add_override_aud`](#0x1_keyless_account_add_override_aud) +- [Function `set_groth16_verification_key_for_next_epoch`](#0x1_keyless_account_set_groth16_verification_key_for_next_epoch) +- [Function `set_configuration_for_next_epoch`](#0x1_keyless_account_set_configuration_for_next_epoch) +- [Function `update_training_wheels_for_next_epoch`](#0x1_keyless_account_update_training_wheels_for_next_epoch) +- [Function `update_max_exp_horizon_for_next_epoch`](#0x1_keyless_account_update_max_exp_horizon_for_next_epoch) +- [Function `remove_all_override_auds_for_next_epoch`](#0x1_keyless_account_remove_all_override_auds_for_next_epoch) +- [Function `add_override_aud_for_next_epoch`](#0x1_keyless_account_add_override_aud_for_next_epoch) +- [Function `on_new_epoch`](#0x1_keyless_account_on_new_epoch) +- [Specification](#@Specification_1) + + +
use 0x1::bn254_algebra;
+use 0x1::chain_status;
+use 0x1::config_buffer;
+use 0x1::crypto_algebra;
+use 0x1::ed25519;
+use 0x1::option;
+use 0x1::signer;
+use 0x1::string;
+use 0x1::system_addresses;
+
+ + + + + +## Struct `Group` + + + +
#[resource_group(#[scope = global])]
+struct Group
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `Groth16VerificationKey` + +The 288-byte Groth16 verification key (VK) for the ZK relation that implements keyless accounts + + +
#[resource_group_member(#[group = 0x1::keyless_account::Group])]
+struct Groth16VerificationKey has drop, store, key
+
+ + + +
+Fields + + +
+
+alpha_g1: vector<u8> +
+
+ 32-byte serialization of alpha * G, where G is the generator of G1. +
+
+beta_g2: vector<u8> +
+
+ 64-byte serialization of alpha * H, where H is the generator of G2. +
+
+gamma_g2: vector<u8> +
+
+ 64-byte serialization of gamma * H, where H is the generator of G2. +
+
+delta_g2: vector<u8> +
+
+ 64-byte serialization of delta * H, where H is the generator of G2. +
+
+gamma_abc_g1: vector<vector<u8>> +
+
+ \forall i \in {0, ..., \ell}, 64-byte serialization of gamma^{-1} * (beta * a_i + alpha * b_i + c_i) * H, where + H is the generator of G1 and \ell is 1 for the ZK relation. +
+
+ + +
+ + + +## Resource `Configuration` + + + +
#[resource_group_member(#[group = 0x1::keyless_account::Group])]
+struct Configuration has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+override_aud_vals: vector<string::String> +
+
+ An override aud for the identity of a recovery service, which will help users recover their keyless accounts + associated with dapps or wallets that have disappeared. + IMPORTANT: This recovery service **cannot** on its own take over user accounts; a user must first sign in + via OAuth in the recovery service in order to allow it to rotate any of that user's keyless accounts. +
+
+max_signatures_per_txn: u16 +
+
+ No transaction can have more than this many keyless signatures. +
+
+max_exp_horizon_secs: u64 +
+
+ How far in the future from the JWT issued at time the EPK expiry can be set. +
+
+training_wheels_pubkey: option::Option<vector<u8>> +
+
+ The training wheels PK, if training wheels are on +
+
+max_commited_epk_bytes: u16 +
+
+ The max length of an ephemeral public key supported in our circuit (93 bytes) +
+
+max_iss_val_bytes: u16 +
+
+ The max length of the value of the JWT's iss field supported in our circuit (e.g., "https://accounts.google.com") +
+
+max_extra_field_bytes: u16 +
+
+ The max length of the JWT field name and value (e.g., "max_age":"18") supported in our circuit +
+
+max_jwt_header_b64_bytes: u32 +
+
+ The max length of the base64url-encoded JWT header in bytes supported in our circuit +
+
+ + +
+ + + +## Constants + + + + +A serialized BN254 G1 point is invalid. + + +
const E_INVALID_BN254_G1_SERIALIZATION: u64 = 2;
+
+ + + + + +A serialized BN254 G2 point is invalid. + + +
const E_INVALID_BN254_G2_SERIALIZATION: u64 = 3;
+
+ + + + + +The training wheels PK needs to be 32 bytes long. + + +
const E_TRAINING_WHEELS_PK_WRONG_SIZE: u64 = 1;
+
+ + + + + +## Function `new_groth16_verification_key` + + + +
public fun new_groth16_verification_key(alpha_g1: vector<u8>, beta_g2: vector<u8>, gamma_g2: vector<u8>, delta_g2: vector<u8>, gamma_abc_g1: vector<vector<u8>>): keyless_account::Groth16VerificationKey
+
+ + + +
+Implementation + + +
public fun new_groth16_verification_key(alpha_g1: vector<u8>,
+                                        beta_g2: vector<u8>,
+                                        gamma_g2: vector<u8>,
+                                        delta_g2: vector<u8>,
+                                        gamma_abc_g1: vector<vector<u8>>
+): Groth16VerificationKey {
+    Groth16VerificationKey {
+        alpha_g1,
+        beta_g2,
+        gamma_g2,
+        delta_g2,
+        gamma_abc_g1,
+    }
+}
+
+ + + +
+ + + +## Function `new_configuration` + + + +
public fun new_configuration(override_aud_val: vector<string::String>, max_signatures_per_txn: u16, max_exp_horizon_secs: u64, training_wheels_pubkey: option::Option<vector<u8>>, max_commited_epk_bytes: u16, max_iss_val_bytes: u16, max_extra_field_bytes: u16, max_jwt_header_b64_bytes: u32): keyless_account::Configuration
+
+ + + +
+Implementation + + +
public fun new_configuration(
+    override_aud_val: vector<String>,
+    max_signatures_per_txn: u16,
+    max_exp_horizon_secs: u64,
+    training_wheels_pubkey: Option<vector<u8>>,
+    max_commited_epk_bytes: u16,
+    max_iss_val_bytes: u16,
+    max_extra_field_bytes: u16,
+    max_jwt_header_b64_bytes: u32
+): Configuration {
+    Configuration {
+        override_aud_vals: override_aud_val,
+        max_signatures_per_txn,
+        max_exp_horizon_secs,
+        training_wheels_pubkey,
+        max_commited_epk_bytes,
+        max_iss_val_bytes,
+        max_extra_field_bytes,
+        max_jwt_header_b64_bytes,
+    }
+}
+
+ + + +
+ + + +## Function `validate_groth16_vk` + +Pre-validate the VK to actively-prevent incorrect VKs from being set on-chain. + + +
fun validate_groth16_vk(vk: &keyless_account::Groth16VerificationKey)
+
+ + + +
+Implementation + + +
fun validate_groth16_vk(vk: &Groth16VerificationKey) {
+    // Could be leveraged to speed up the VM deserialization of the VK by 2x, since it can assume the points are valid.
+    assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G1, bn254_algebra::FormatG1Compr>(&vk.alpha_g1)), E_INVALID_BN254_G1_SERIALIZATION);
+    assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G2, bn254_algebra::FormatG2Compr>(&vk.beta_g2)), E_INVALID_BN254_G2_SERIALIZATION);
+    assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G2, bn254_algebra::FormatG2Compr>(&vk.gamma_g2)), E_INVALID_BN254_G2_SERIALIZATION);
+    assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G2, bn254_algebra::FormatG2Compr>(&vk.delta_g2)), E_INVALID_BN254_G2_SERIALIZATION);
+    for (i in 0..vector::length(&vk.gamma_abc_g1)) {
+        assert!(option::is_some(&crypto_algebra::deserialize<bn254_algebra::G1, bn254_algebra::FormatG1Compr>(vector::borrow(&vk.gamma_abc_g1, i))), E_INVALID_BN254_G1_SERIALIZATION);
+    };
+}
+
+ + + +
+ + + +## Function `update_groth16_verification_key` + +Sets the Groth16 verification key, only callable during genesis. To call during governance proposals, use +set_groth16_verification_key_for_next_epoch. + +WARNING: See set_groth16_verification_key_for_next_epoch for caveats. + + +
public fun update_groth16_verification_key(fx: &signer, vk: keyless_account::Groth16VerificationKey)
+
+ + + +
+Implementation + + +
public fun update_groth16_verification_key(fx: &signer, vk: Groth16VerificationKey) {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+    // There should not be a previous resource set here.
+    move_to(fx, vk);
+}
+
+ + + +
+ + + +## Function `update_configuration` + +Sets the keyless configuration, only callable during genesis. To call during governance proposals, use +set_configuration_for_next_epoch. + +WARNING: See set_configuration_for_next_epoch for caveats. + + +
public fun update_configuration(fx: &signer, config: keyless_account::Configuration)
+
+ + + +
+Implementation + + +
public fun update_configuration(fx: &signer, config: Configuration) {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+    // There should not be a previous resource set here.
+    move_to(fx, config);
+}
+
+ + + +
+ + + +## Function `update_training_wheels` + + + +
#[deprecated]
+public fun update_training_wheels(fx: &signer, pk: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
public fun update_training_wheels(fx: &signer, pk: Option<vector<u8>>) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    if (option::is_some(&pk)) {
+        assert!(vector::length(option::borrow(&pk)) == 32, E_TRAINING_WHEELS_PK_WRONG_SIZE)
+    };
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.training_wheels_pubkey = pk;
+}
+
+ + + +
+ + + +## Function `update_max_exp_horizon` + + + +
#[deprecated]
+public fun update_max_exp_horizon(fx: &signer, max_exp_horizon_secs: u64)
+
+ + + +
+Implementation + + +
public fun update_max_exp_horizon(fx: &signer, max_exp_horizon_secs: u64) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.max_exp_horizon_secs = max_exp_horizon_secs;
+}
+
+ + + +
+ + + +## Function `remove_all_override_auds` + + + +
#[deprecated]
+public fun remove_all_override_auds(fx: &signer)
+
+ + + +
+Implementation + + +
public fun remove_all_override_auds(fx: &signer) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    config.override_aud_vals = vector[];
+}
+
+ + + +
+ + + +## Function `add_override_aud` + + + +
#[deprecated]
+public fun add_override_aud(fx: &signer, aud: string::String)
+
+ + + +
+Implementation + + +
public fun add_override_aud(fx: &signer, aud: String) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+    chain_status::assert_genesis();
+
+    let config = borrow_global_mut<Configuration>(signer::address_of(fx));
+    vector::push_back(&mut config.override_aud_vals, aud);
+}
+
+ + + +
+ + + +## Function `set_groth16_verification_key_for_next_epoch` + +Queues up a change to the Groth16 verification key. The change will only be effective after reconfiguration. +Only callable via governance proposal. + +WARNING: To mitigate against DoS attacks, a VK change should be done together with a training wheels PK change, +so that old ZKPs for the old VK cannot be replayed as potentially-valid ZKPs. + +WARNING: If a malicious key is set, this would lead to stolen funds. + + +
public fun set_groth16_verification_key_for_next_epoch(fx: &signer, vk: keyless_account::Groth16VerificationKey)
+
+ + + +
+Implementation + + +
public fun set_groth16_verification_key_for_next_epoch(fx: &signer, vk: Groth16VerificationKey) {
+    system_addresses::assert_aptos_framework(fx);
+    config_buffer::upsert<Groth16VerificationKey>(vk);
+}
+
+ + + +
+ + + +## Function `set_configuration_for_next_epoch` + +Queues up a change to the keyless configuration. The change will only be effective after reconfiguration. Only +callable via governance proposal. + +WARNING: A malicious Configuration could lead to DoS attacks, create liveness issues, or enable a malicious +recovery service provider to phish users' accounts. + + +
public fun set_configuration_for_next_epoch(fx: &signer, config: keyless_account::Configuration)
+
+ + + +
+Implementation + + +
public fun set_configuration_for_next_epoch(fx: &signer, config: Configuration) {
+    system_addresses::assert_aptos_framework(fx);
+    config_buffer::upsert<Configuration>(config);
+}
+
+ + + +
+ + + +## Function `update_training_wheels_for_next_epoch` + +Convenience method to queue up a change to the training wheels PK. The change will only be effective after +reconfiguration. Only callable via governance proposal. + +WARNING: If a malicious key is set, this *could* lead to stolen funds. + + +
public fun update_training_wheels_for_next_epoch(fx: &signer, pk: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
public fun update_training_wheels_for_next_epoch(fx: &signer, pk: Option<vector<u8>>) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    // If a PK is being set, validate it first.
+    if (option::is_some(&pk)) {
+        let bytes = *option::borrow(&pk);
+        let vpk = ed25519::new_validated_public_key_from_bytes(bytes);
+        assert!(option::is_some(&vpk), E_TRAINING_WHEELS_PK_WRONG_SIZE)
+    };
+
+    let config = if (config_buffer::does_exist<Configuration>()) {
+        config_buffer::extract<Configuration>()
+    } else {
+        *borrow_global<Configuration>(signer::address_of(fx))
+    };
+
+    config.training_wheels_pubkey = pk;
+
+    set_configuration_for_next_epoch(fx, config);
+}
+
+ + + +
+ + + +## Function `update_max_exp_horizon_for_next_epoch` + +Convenience method to queues up a change to the max expiration horizon. The change will only be effective after +reconfiguration. Only callable via governance proposal. + + +
public fun update_max_exp_horizon_for_next_epoch(fx: &signer, max_exp_horizon_secs: u64)
+
+ + + +
+Implementation + + +
public fun update_max_exp_horizon_for_next_epoch(fx: &signer, max_exp_horizon_secs: u64) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = if (config_buffer::does_exist<Configuration>()) {
+        config_buffer::extract<Configuration>()
+    } else {
+        *borrow_global<Configuration>(signer::address_of(fx))
+    };
+
+    config.max_exp_horizon_secs = max_exp_horizon_secs;
+
+    set_configuration_for_next_epoch(fx, config);
+}
+
+ + + +
+ + + +## Function `remove_all_override_auds_for_next_epoch` + +Convenience method to queue up clearing the set of override aud's. The change will only be effective after +reconfiguration. Only callable via governance proposal. + +WARNING: When no override aud is set, recovery of keyless accounts associated with applications that disappeared +is no longer possible. + + +
public fun remove_all_override_auds_for_next_epoch(fx: &signer)
+
+ + + +
+Implementation + + +
public fun remove_all_override_auds_for_next_epoch(fx: &signer) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = if (config_buffer::does_exist<Configuration>()) {
+        config_buffer::extract<Configuration>()
+    } else {
+        *borrow_global<Configuration>(signer::address_of(fx))
+    };
+
+    config.override_aud_vals = vector[];
+
+    set_configuration_for_next_epoch(fx, config);
+}
+
+ + + +
+ + + +## Function `add_override_aud_for_next_epoch` + +Convenience method to queue up an append to the set of override aud's. The change will only be effective +after reconfiguration. Only callable via governance proposal. + +WARNING: If a malicious override aud is set, this *could* lead to stolen funds. + + +
public fun add_override_aud_for_next_epoch(fx: &signer, aud: string::String)
+
+ + + +
+Implementation + + +
public fun add_override_aud_for_next_epoch(fx: &signer, aud: String) acquires Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    let config = if (config_buffer::does_exist<Configuration>()) {
+        config_buffer::extract<Configuration>()
+    } else {
+        *borrow_global<Configuration>(signer::address_of(fx))
+    };
+
+    vector::push_back(&mut config.override_aud_vals, aud);
+
+    set_configuration_for_next_epoch(fx, config);
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the queued up configuration changes, if there are any. + + +
public(friend) fun on_new_epoch(fx: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch(fx: &signer) acquires Groth16VerificationKey, Configuration {
+    system_addresses::assert_aptos_framework(fx);
+
+    if (config_buffer::does_exist<Groth16VerificationKey>()) {
+        let vk = config_buffer::extract();
+        if (exists<Groth16VerificationKey>(@aptos_framework)) {
+            *borrow_global_mut<Groth16VerificationKey>(@aptos_framework) = vk;
+        } else {
+            move_to(fx, vk);
+        }
+    };
+
+    if (config_buffer::does_exist<Configuration>()) {
+        let config = config_buffer::extract();
+        if (exists<Configuration>(@aptos_framework)) {
+            *borrow_global_mut<Configuration>(@aptos_framework) = config;
+        } else {
+            move_to(fx, config);
+        }
+    };
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify=false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/tests/deflation_token_tests.move b/aptos-move/framework/aptos-framework/tests/deflation_token_tests.move index 32f4f153a2cd4..6d7b6d32b3dae 100644 --- a/aptos-move/framework/aptos-framework/tests/deflation_token_tests.move +++ b/aptos-move/framework/aptos-framework/tests/deflation_token_tests.move @@ -152,7 +152,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::none(), + option::none() ); } @@ -218,7 +218,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::some(withdraw), + option::some(withdraw) ); } @@ -241,7 +241,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::none(), + option::none() ); } @@ -263,7 +263,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::none(), + option::none() ); } @@ -303,7 +303,7 @@ module 0xcafe::deflation_token_tests { &creator_ref, option::some(withdraw), option::none(), - option::none(), + option::none() ); } diff --git a/aptos-move/framework/aptos-framework/tests/delegation_pool_integration_tests.move b/aptos-move/framework/aptos-framework/tests/delegation_pool_integration_tests.move index 387c46f549513..12b0d219cb96d 100644 --- a/aptos-move/framework/aptos-framework/tests/delegation_pool_integration_tests.move +++ b/aptos-move/framework/aptos-framework/tests/delegation_pool_integration_tests.move @@ -84,6 +84,7 @@ module aptos_framework::delegation_pool_integration_tests { #[test_only] public fun mint_and_add_stake(account: &signer, amount: u64) { + account::create_account_for_test(signer::address_of(account)); stake::mint(account, amount); dp::add_stake(account, dp::get_owned_pool_address(signer::address_of(account)), amount); } diff --git a/aptos-move/framework/aptos-framework/tests/function_info_tests.move b/aptos-move/framework/aptos-framework/tests/function_info_tests.move index 93a7776f5974b..1850930e88487 100644 --- a/aptos-move/framework/aptos-framework/tests/function_info_tests.move +++ b/aptos-move/framework/aptos-framework/tests/function_info_tests.move @@ -45,7 +45,7 @@ module aptos_framework::function_info_tests { } #[test] - #[expected_failure(abort_code = 0x2, location = aptos_framework::function_info)] + #[expected_failure(abort_code = 0x1, location = Self)] fun test_func_type_eq_reject_same_module() { let m2 = string::utf8(b"function_info_tests"); let lhs = function_info::new_function_info_from_address(@aptos_framework, m2, string::utf8(b"lhs")); diff --git a/aptos-move/framework/aptos-framework/tests/native_disaptch_token_tests.move b/aptos-move/framework/aptos-framework/tests/native_disaptch_token_tests.move new file mode 100644 index 0000000000000..6b942f1078659 --- /dev/null +++ b/aptos-move/framework/aptos-framework/tests/native_disaptch_token_tests.move @@ -0,0 +1,16 @@ +#[test_only] +module aptos_framework::native_dispatch_token_tests { + use aptos_framework::fungible_asset; + use 0xcafe::native_dispatch_token; + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code=0x10019, location=aptos_framework::fungible_asset)] + fun test_native_dispatch_token( + creator: &signer, + ) { + let (creator_ref, _) = fungible_asset::create_test_token(creator); + fungible_asset::init_test_metadata(&creator_ref); + + native_dispatch_token::initialize(creator, &creator_ref); + } +} diff --git a/aptos-move/framework/aptos-framework/tests/native_dispatch_token.move b/aptos-move/framework/aptos-framework/tests/native_dispatch_token.move new file mode 100644 index 0000000000000..51e7ef601c2fe --- /dev/null +++ b/aptos-move/framework/aptos-framework/tests/native_dispatch_token.move @@ -0,0 +1,33 @@ +#[test_only] +module 0xcafe::native_dispatch_token { + use aptos_framework::fungible_asset::{FungibleAsset, TransferRef}; + use aptos_framework::dispatchable_fungible_asset; + use aptos_framework::object::{ConstructorRef, Object}; + use aptos_framework::function_info; + + use std::option; + use std::signer; + use std::string; + + public fun initialize(account: &signer, constructor_ref: &ConstructorRef) { + assert!(signer::address_of(account) == @0xcafe, 1); + let withdraw = function_info::new_function_info( + account, + string::utf8(b"native_dispatch_token"), + string::utf8(b"withdraw"), + ); + + dispatchable_fungible_asset::register_dispatch_functions( + constructor_ref, + option::some(withdraw), + option::none(), + option::none(), + ); + } + + public native fun withdraw( + store: Object, + _amount: u64, + transfer_ref: &TransferRef, + ): FungibleAsset; +} diff --git a/aptos-move/framework/aptos-framework/tests/nil_op_token.move b/aptos-move/framework/aptos-framework/tests/nil_op_token.move index 6a4a852e19c78..9cdf7b13a6de2 100644 --- a/aptos-move/framework/aptos-framework/tests/nil_op_token.move +++ b/aptos-move/framework/aptos-framework/tests/nil_op_token.move @@ -21,7 +21,7 @@ module 0xcafe::nil_op_token { constructor_ref, option::some(withdraw), option::none(), - option::none() + option::none(), ); } diff --git a/aptos-move/framework/aptos-framework/tests/nil_op_token_tests.move b/aptos-move/framework/aptos-framework/tests/nil_op_token_tests.move index d4bf6abbdba52..985fab258bc71 100644 --- a/aptos-move/framework/aptos-framework/tests/nil_op_token_tests.move +++ b/aptos-move/framework/aptos-framework/tests/nil_op_token_tests.move @@ -7,7 +7,6 @@ module aptos_framework::nil_op_token_tests { use std::option; #[test(creator = @0xcafe)] - #[expected_failure(abort_code=0x70002, location=aptos_framework::dispatchable_fungible_asset)] fun test_nil_op_token( creator: &signer, ) { @@ -26,7 +25,6 @@ module aptos_framework::nil_op_token_tests { // Deposit will cause an re-entrant call into dispatchable_fungible_asset dispatchable_fungible_asset::deposit(creator_store, fa); - // Withdraw will fail because it's not drawing the basic amount. let fa = dispatchable_fungible_asset::withdraw(creator, creator_store, 10); dispatchable_fungible_asset::deposit(creator_store, fa); } diff --git a/aptos-move/framework/aptos-framework/tests/permissioned_signer_tests.move b/aptos-move/framework/aptos-framework/tests/permissioned_signer_tests.move new file mode 100644 index 0000000000000..ec8393fd07ae0 --- /dev/null +++ b/aptos-move/framework/aptos-framework/tests/permissioned_signer_tests.move @@ -0,0 +1,381 @@ +#[test_only] +module aptos_framework::permissioned_signer_tests { + use std::bcs; + use std::features; + use aptos_framework::account::create_signer_for_test; + use aptos_framework::permissioned_signer; + use aptos_framework::timestamp; + use std::option; + use std::signer; + + struct OnePermission has copy, drop, store {} + + struct AddressPermission has copy, drop, store { + addr: address + } + + #[test(creator = @0xcafe)] + fun test_permission_e2e(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = permissioned_signer::create_permissioned_handle(creator); + let perm_signer = permissioned_signer::signer_from_permissioned_handle(&perm_handle); + + assert!(permissioned_signer::is_permissioned_signer(&perm_signer), 1); + assert!(!permissioned_signer::is_permissioned_signer(creator), 1); + assert!(signer::address_of(&perm_signer) == signer::address_of(creator), 1); + + permissioned_signer::authorize_increase(creator, &perm_signer, 100, OnePermission {}); + assert!( + permissioned_signer::capacity(&perm_signer, OnePermission {}) + == option::some(100), + 1 + ); + + assert!( + permissioned_signer::check_permission_consume( + &perm_signer, 10, OnePermission {} + ), + 1 + ); + assert!( + permissioned_signer::capacity(&perm_signer, OnePermission {}) + == option::some(90), + 1 + ); + + permissioned_signer::authorize_increase( + creator, + &perm_signer, + 5, + AddressPermission { addr: @0x1 } + ); + + assert!( + permissioned_signer::capacity(&perm_signer, AddressPermission { addr: @0x1 }) + == option::some(5), + 1 + ); + assert!( + permissioned_signer::capacity(&perm_signer, AddressPermission { addr: @0x2 }) + == option::none(), + 1 + ); + + // Not enough capacity, check permission should return false + assert!( + !permissioned_signer::check_permission_consume( + &perm_signer, 10, AddressPermission { addr: @0x1 } + ), + 1 + ); + + assert!( + permissioned_signer::check_permission_consume( + &perm_signer, 5, AddressPermission { addr: @0x1 } + ), + 1 + ); + + // Remaining capacity is 0, should be viewed as non-exist. + assert!( + !permissioned_signer::check_permission_exists( + &perm_signer, AddressPermission { addr: @0x1 } + ), + 1 + ); + + permissioned_signer::revoke_permission(&perm_signer, OnePermission {}); + assert!( + permissioned_signer::capacity(&perm_signer, OnePermission {}) + == option::none(), + 1 + ); + + permissioned_signer::destroy_permissioned_handle(perm_handle); + } + + #[test(creator = @0xcafe)] + fun test_storable_permission_e2e(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = + permissioned_signer::create_storable_permissioned_handle(creator, 60); + let perm_signer = + permissioned_signer::signer_from_storable_permissioned_handle(&perm_handle); + + assert!(permissioned_signer::is_permissioned_signer(&perm_signer), 1); + assert!(!permissioned_signer::is_permissioned_signer(creator), 1); + assert!(signer::address_of(&perm_signer) == signer::address_of(creator), 1); + + permissioned_signer::authorize_increase(creator, &perm_signer, 100, OnePermission {}); + assert!( + permissioned_signer::capacity(&perm_signer, OnePermission {}) + == option::some(100), + 1 + ); + + assert!( + permissioned_signer::check_permission_consume( + &perm_signer, 10, OnePermission {} + ), + 1 + ); + assert!( + permissioned_signer::capacity(&perm_signer, OnePermission {}) + == option::some(90), + 1 + ); + + permissioned_signer::authorize_increase( + creator, + &perm_signer, + 5, + AddressPermission { addr: @0x1 } + ); + + assert!( + permissioned_signer::capacity(&perm_signer, AddressPermission { addr: @0x1 }) + == option::some(5), + 1 + ); + assert!( + permissioned_signer::capacity(&perm_signer, AddressPermission { addr: @0x2 }) + == option::none(), + 1 + ); + + // Not enough capacity, check permission should return false + assert!( + !permissioned_signer::check_permission_consume( + &perm_signer, 10, AddressPermission { addr: @0x1 } + ), + 1 + ); + + permissioned_signer::revoke_permission(&perm_signer, OnePermission {}); + assert!( + permissioned_signer::capacity(&perm_signer, OnePermission {}) + == option::none(), + 1 + ); + + permissioned_signer::destroy_storable_permissioned_handle(perm_handle); + } + + #[test(creator = @0xcafe)] + #[expected_failure( + abort_code = 0x50005, location = aptos_framework::permissioned_signer + )] + fun test_permission_expiration(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = + permissioned_signer::create_storable_permissioned_handle(creator, 60); + let _perm_signer = + permissioned_signer::signer_from_storable_permissioned_handle(&perm_handle); + + timestamp::fast_forward_seconds(60); + let _perm_signer = + permissioned_signer::signer_from_storable_permissioned_handle(&perm_handle); + + permissioned_signer::destroy_storable_permissioned_handle(perm_handle); + } + + // invalid authorization + // 1. master signer is a permissioned signer + // 2. permissioned signer is a master signer + // 3. permissioned and main signer address mismatch + #[test(creator = @0xcafe)] + #[expected_failure( + abort_code = 0x50002, location = aptos_framework::permissioned_signer + )] + fun test_auth_1(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = permissioned_signer::create_permissioned_handle(creator); + let perm_signer = permissioned_signer::signer_from_permissioned_handle(&perm_handle); + + permissioned_signer::authorize_increase( + &perm_signer, + &perm_signer, + 100, + OnePermission {} + ); + permissioned_signer::destroy_permissioned_handle(perm_handle); + } + + #[test(creator = @0xcafe)] + #[expected_failure( + abort_code = 0x50002, location = aptos_framework::permissioned_signer + )] + fun test_auth_2(creator: &signer) { + permissioned_signer::authorize_increase(creator, creator, 100, OnePermission {}); + } + + #[test(creator = @0xcafe, creator2 = @0xbeef)] + #[expected_failure( + abort_code = 0x50002, location = aptos_framework::permissioned_signer + )] + fun test_auth_3(creator: &signer, creator2: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = permissioned_signer::create_permissioned_handle(creator); + let perm_signer = permissioned_signer::signer_from_permissioned_handle(&perm_handle); + + permissioned_signer::authorize_increase(creator2, &perm_signer, 100, OnePermission {}); + permissioned_signer::destroy_permissioned_handle(perm_handle); + } + + // Accessing capacity on a master signer + #[test(creator = @0xcafe)] + fun test_invalid_capacity(creator: &signer) { + assert!( + permissioned_signer::capacity(creator, OnePermission {}) + == option::some( + 115792089237316195423570985008687907853269984665640564039457584007913129639935 + ), + 1 + ); + } + + // Making sure master signer always have all permissions even when feature is disabled. + #[test(creator = @aptos_framework)] + fun test_master_signer_permission(creator: &signer) { + assert!( + permissioned_signer::check_permission_exists(creator, OnePermission {}), + 1 + ); + + // Disable the permissioned signer feature. + features::change_feature_flags_for_testing( + creator, + vector[], + vector[features::get_permissioned_signer_feature()] + ); + + // Master signer should still have permission after feature is disabled. + assert!( + permissioned_signer::check_permission_exists(creator, OnePermission {}), + 1 + ); + } + + // creating permission using a permissioned signer + #[test(creator = @0xcafe)] + #[expected_failure( + abort_code = 0x50001, location = aptos_framework::permissioned_signer + )] + fun test_invalid_creation(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = permissioned_signer::create_permissioned_handle(creator); + let perm_signer = permissioned_signer::signer_from_permissioned_handle(&perm_handle); + + let perm_handle_2 = permissioned_signer::create_permissioned_handle(&perm_signer); + permissioned_signer::destroy_permissioned_handle(perm_handle); + permissioned_signer::destroy_permissioned_handle(perm_handle_2); + } + + #[test(creator = @0xcafe)] + fun test_permission_revocation_success(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = + permissioned_signer::create_storable_permissioned_handle(creator, 60); + let _perm_signer = + permissioned_signer::signer_from_storable_permissioned_handle(&perm_handle); + + permissioned_signer::revoke_permission_storage_address( + creator, permissioned_signer::permissions_storage_address(&perm_handle) + ); + + permissioned_signer::destroy_storable_permissioned_handle(perm_handle); + } + + #[test(creator = @0xcafe)] + fun test_permission_revocation_success_with_permissioned_signer(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = + permissioned_signer::create_storable_permissioned_handle(creator, 60); + let perm_signer = + permissioned_signer::signer_from_storable_permissioned_handle(&perm_handle); + + permissioned_signer::grant_revoke_permission(creator, &perm_signer); + + permissioned_signer::revoke_permission_storage_address( + &perm_signer, permissioned_signer::permissions_storage_address(&perm_handle) + ); + + permissioned_signer::destroy_storable_permissioned_handle(perm_handle); + } + + #[test(creator = @0xcafe)] + #[expected_failure( + abort_code = 0x50007, location = aptos_framework::permissioned_signer + )] + fun test_permission_revocation_and_access(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = + permissioned_signer::create_storable_permissioned_handle(creator, 60); + let _perm_signer = + permissioned_signer::signer_from_storable_permissioned_handle(&perm_handle); + + permissioned_signer::revoke_permission_storage_address( + creator, permissioned_signer::permissions_storage_address(&perm_handle) + ); + let _perm_signer = + permissioned_signer::signer_from_storable_permissioned_handle(&perm_handle); + + permissioned_signer::destroy_storable_permissioned_handle(perm_handle); + } + + #[test(creator1 = @0xcafe, creator2 = @0xbafe)] + #[expected_failure( + abort_code = 0x50008, location = aptos_framework::permissioned_signer + )] + fun test_permission_revoke_other(creator1: &signer, creator2: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle_1 = + permissioned_signer::create_storable_permissioned_handle(creator1, 60); + + let perm_handle_2 = + permissioned_signer::create_storable_permissioned_handle(creator2, 60); + + permissioned_signer::revoke_permission_storage_address( + creator1, permissioned_signer::permissions_storage_address(&perm_handle_2) + ); + + permissioned_signer::destroy_storable_permissioned_handle(perm_handle_1); + permissioned_signer::destroy_storable_permissioned_handle(perm_handle_2); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code = 453, location = std::bcs)] + fun test_permissioned_signer_serialization(creator: &signer) { + let aptos_framework = create_signer_for_test(@0x1); + timestamp::set_time_has_started_for_testing(&aptos_framework); + + let perm_handle = + permissioned_signer::create_storable_permissioned_handle(creator, 60); + let perm_signer = + permissioned_signer::signer_from_storable_permissioned_handle(&perm_handle); + + bcs::to_bytes(&perm_signer); + + permissioned_signer::destroy_storable_permissioned_handle(perm_handle); + } +} diff --git a/aptos-move/framework/aptos-framework/tests/permissioned_token.move b/aptos-move/framework/aptos-framework/tests/permissioned_token.move index fe0ea6d3a7dfc..724ee6b628fac 100644 --- a/aptos-move/framework/aptos-framework/tests/permissioned_token.move +++ b/aptos-move/framework/aptos-framework/tests/permissioned_token.move @@ -31,7 +31,7 @@ module 0xcafe::permissioned_token { constructor_ref, option::some(withdraw), option::none(), - option::none() + option::none(), ); } diff --git a/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token.move b/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token.move index 7c6e05e0fc5ea..3f9057521bcaa 100644 --- a/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token.move +++ b/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token.move @@ -28,7 +28,7 @@ module 0xcafe::simple_token { constructor_ref, option::some(withdraw), option::some(deposit), - option::none(), + option::none() ); } diff --git a/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token_pfs_tests.move b/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token_pfs_tests.move index 1b80c489024e5..296c74c645425 100644 --- a/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token_pfs_tests.move +++ b/aptos-move/framework/aptos-framework/tests/simple_dispatchable_token_pfs_tests.move @@ -28,7 +28,7 @@ module aptos_framework::simple_token_pfs_tests { // User 2 burns their primary store but should still be able to transfer afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object_with_transfer(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); // Balance still works assert!(balance(user_2_address, metadata) == 80, 0); @@ -54,7 +54,7 @@ module aptos_framework::simple_token_pfs_tests { // User 2 burns their primary store but should still be able to withdraw afterward. let user_2_primary_store = primary_store(user_2_address, metadata); - object::burn(user_2, user_2_primary_store); + object::burn_object_with_transfer(user_2, user_2_primary_store); assert!(object::is_burnt(user_2_primary_store), 0); let coins = withdraw(user_2, metadata, 70); assert!(balance(user_2_address, metadata) == 10, 0); diff --git a/aptos-move/framework/aptos-framework/tests/ten_x_token.move b/aptos-move/framework/aptos-framework/tests/ten_x_token.move index 7db59d838c9fc..42799fdfa3efb 100644 --- a/aptos-move/framework/aptos-framework/tests/ten_x_token.move +++ b/aptos-move/framework/aptos-framework/tests/ten_x_token.move @@ -1,31 +1,90 @@ #[test_only] module 0xcafe::ten_x_token { - use aptos_framework::fungible_asset; + use aptos_framework::fungible_asset::{Self, FungibleAsset, RawBalanceRef, RawSupplyRef, TransferRef}; use aptos_framework::dispatchable_fungible_asset; use aptos_framework::object::{ConstructorRef, Object}; use aptos_framework::function_info; use std::option; + use std::option::Option; use std::signer; use std::string; + struct BalanceStore has key { + balance_ref: RawBalanceRef, + supply_ref: RawSupplyRef, + } + public fun initialize(account: &signer, constructor_ref: &ConstructorRef) { assert!(signer::address_of(account) == @0xcafe, 1); - let value = function_info::new_function_info( + let balance_ref = fungible_asset::generate_raw_balance_ref(constructor_ref); + let supply_ref = fungible_asset::generate_raw_supply_ref(constructor_ref); + move_to(account, BalanceStore { balance_ref, supply_ref }); + + let balance_value = function_info::new_function_info( account, string::utf8(b"ten_x_token"), string::utf8(b"derived_balance"), ); + let supply_value = function_info::new_function_info( + account, + string::utf8(b"ten_x_token"), + string::utf8(b"derived_supply"), + ); + + let withdraw = function_info::new_function_info( + account, + string::utf8(b"ten_x_token"), + string::utf8(b"withdraw"), + ); + + let deposit = function_info::new_function_info( + account, + string::utf8(b"ten_x_token"), + string::utf8(b"deposit"), + ); + dispatchable_fungible_asset::register_dispatch_functions( constructor_ref, - option::none(), - option::none(), - option::some(value) + option::some(withdraw), + option::some(deposit), + option::some(balance_value) + ); + dispatchable_fungible_asset::register_derive_supply_dispatch_function( + constructor_ref, + option::some(supply_value) ); } - public fun derived_balance(store: Object): u64 { + public fun derived_balance(store: Object): u64 acquires BalanceStore { // Derived value is always 10x! - fungible_asset::balance(store) * 10 + fungible_asset::balance_with_ref( + &borrow_global(@0xcafe).balance_ref, + store + ) * 10 + } + + public fun derived_supply(metadata: Object): Option acquires BalanceStore { + // Derived supply is 10x. + option::some(option::extract(&mut fungible_asset::supply_with_ref( + &borrow_global(@0xcafe).supply_ref, + metadata + )) * 10) + } + + public fun withdraw( + store: Object, + amount: u64, + transfer_ref: &TransferRef, + ): FungibleAsset { + fungible_asset::withdraw_with_ref(transfer_ref, store, amount) + } + + public fun deposit( + store: Object, + fa: FungibleAsset, + transfer_ref: &TransferRef, + ) { + fungible_asset::deposit_with_ref(transfer_ref, store, fa) } } diff --git a/aptos-move/framework/aptos-framework/tests/ten_x_token_tests.move b/aptos-move/framework/aptos-framework/tests/ten_x_token_tests.move index d62278c6c54d0..d533244d58129 100644 --- a/aptos-move/framework/aptos-framework/tests/ten_x_token_tests.move +++ b/aptos-move/framework/aptos-framework/tests/ten_x_token_tests.move @@ -2,9 +2,11 @@ module aptos_framework::ten_x_token_tests { use aptos_framework::fungible_asset::{Self, Metadata, TestToken}; use aptos_framework::dispatchable_fungible_asset; - use 0xcafe::ten_x_token; + use aptos_framework::primary_fungible_store; use aptos_framework::object; + use 0xcafe::ten_x_token; use std::option; + use std::signer; #[test(creator = @0xcafe)] fun test_ten_x( @@ -18,14 +20,63 @@ module aptos_framework::ten_x_token_tests { ten_x_token::initialize(creator, &creator_ref); - assert!(fungible_asset::supply(metadata) == option::some(0), 1); + assert!(dispatchable_fungible_asset::derived_supply(metadata) == option::some(0), 2); // Mint let fa = fungible_asset::mint(&mint, 100); - assert!(fungible_asset::supply(metadata) == option::some(100), 2); - // Deposit will cause an re-entrant call into dispatchable_fungible_asset dispatchable_fungible_asset::deposit(creator_store, fa); // The derived value is 10x - assert!(dispatchable_fungible_asset::derived_balance(creator_store) == 1000, 5); + assert!(dispatchable_fungible_asset::derived_balance(creator_store) == 1000, 4); + + // The derived supply is 10x + assert!(dispatchable_fungible_asset::derived_supply(metadata) == option::some(1000), 5); + } + + #[test(creator = @0xcafe)] + fun test_ten_x_pfs( + creator: &signer, + ) { + let (creator_ref, token_object) = fungible_asset::create_test_token(creator); + let (mint, _, _) = primary_fungible_store::init_test_metadata_with_primary_store_enabled(&creator_ref); + let metadata = object::convert(token_object); + + ten_x_token::initialize(creator, &creator_ref); + let creator_address = signer::address_of(creator); + + let fa = fungible_asset::mint(&mint, 100); + primary_fungible_store::deposit(creator_address, fa); + + // The derived value is 10x + assert!(primary_fungible_store::balance(creator_address, metadata) == 1000, 4); + assert!(primary_fungible_store::is_balance_at_least(creator_address, metadata, 1000), 4); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code=0x1001C, location=aptos_framework::fungible_asset)] + fun ten_x_balance_abort( + creator: &signer, + ) { + let (creator_ref, token_object) = fungible_asset::create_test_token(creator); + fungible_asset::init_test_metadata(&creator_ref); + let metadata = object::convert(token_object); + + let creator_store = fungible_asset::create_test_store(creator, metadata); + + ten_x_token::initialize(creator, &creator_ref); + assert!(fungible_asset::balance(creator_store) == 0, 1); + } + + #[test(creator = @0xcafe)] + #[expected_failure(abort_code=0x1001C, location=aptos_framework::fungible_asset)] + fun ten_x_supply_abort( + creator: &signer, + ) { + let (creator_ref, token_object) = fungible_asset::create_test_token(creator); + fungible_asset::init_test_metadata(&creator_ref); + let metadata = object::convert(token_object); + + ten_x_token::initialize(creator, &creator_ref); + + assert!(fungible_asset::supply(metadata) == option::some(0), 2); } } diff --git a/aptos-move/framework/aptos-stdlib/doc/any.md b/aptos-move/framework/aptos-stdlib/doc/any.md index e8198a8d9bdbd..1a4704525137b 100644 --- a/aptos-move/framework/aptos-stdlib/doc/any.md +++ b/aptos-move/framework/aptos-stdlib/doc/any.md @@ -120,7 +120,7 @@ also required from T. Unpack a value from the Any representation. This aborts if the value has not the expected type T. -
public fun unpack<T>(x: any::Any): T
+
public fun unpack<T>(self: any::Any): T
 
@@ -129,9 +129,9 @@ Unpack a value from the Any repres Implementation -
public fun unpack<T>(x: Any): T {
-    assert!(type_info::type_name<T>() == x.type_name, error::invalid_argument(ETYPE_MISMATCH));
-    from_bytes<T>(x.data)
+
public fun unpack<T>(self: Any): T {
+    assert!(type_info::type_name<T>() == self.type_name, error::invalid_argument(ETYPE_MISMATCH));
+    from_bytes<T>(self.data)
 }
 
@@ -146,7 +146,7 @@ Unpack a value from the Any repres Returns the type name of this Any -
public fun type_name(x: &any::Any): &string::String
+
public fun type_name(self: &any::Any): &string::String
 
@@ -155,8 +155,8 @@ Returns the type name of this Any Implementation -
public fun type_name(x: &Any): &String {
-    &x.type_name
+
public fun type_name(self: &Any): &String {
+    &self.type_name
 }
 
@@ -195,14 +195,14 @@ Returns the type name of this Any ### Function `unpack` -
public fun unpack<T>(x: any::Any): T
+
public fun unpack<T>(self: any::Any): T
 
include UnpackAbortsIf<T>;
-ensures result == from_bcs::deserialize<T>(x.data);
+ensures result == from_bcs::deserialize<T>(self.data);
 
@@ -212,9 +212,9 @@ Returns the type name of this Any
schema UnpackAbortsIf<T> {
-    x: Any;
-    aborts_if type_info::type_name<T>() != x.type_name;
-    aborts_if !from_bcs::deserializable<T>(x.data);
+    self: Any;
+    aborts_if type_info::type_name<T>() != self.type_name;
+    aborts_if !from_bcs::deserializable<T>(self.data);
 }
 
@@ -225,9 +225,9 @@ Returns the type name of this Any
schema UnpackRequirement<T> {
-    x: Any;
-    requires type_info::type_name<T>() == x.type_name;
-    requires from_bcs::deserializable<T>(x.data);
+    self: Any;
+    requires type_info::type_name<T>() == self.type_name;
+    requires from_bcs::deserializable<T>(self.data);
 }
 
@@ -238,14 +238,14 @@ Returns the type name of this Any ### Function `type_name` -
public fun type_name(x: &any::Any): &string::String
+
public fun type_name(self: &any::Any): &string::String
 
aborts_if false;
-ensures result == x.type_name;
+ensures result == self.type_name;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/base16.md b/aptos-move/framework/aptos-stdlib/doc/base16.md new file mode 100644 index 0000000000000..26085a4e1d6ad --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/base16.md @@ -0,0 +1,113 @@ + + + +# Module `0x1::base16` + + + +- [Function `hex_char_to_u8`](#0x1_base16_hex_char_to_u8) +- [Function `base16_utf8_to_vec_u8`](#0x1_base16_base16_utf8_to_vec_u8) +- [Specification](#@Specification_0) + - [Function `base16_utf8_to_vec_u8`](#@Specification_0_base16_utf8_to_vec_u8) + + +
+ + + + + +## Function `hex_char_to_u8` + + + +
public fun hex_char_to_u8(c: u8): u8
+
+ + + +
+Implementation + + +
public fun hex_char_to_u8(c: u8): u8 {
+    if (c >= 48 && c <= 57) {  // '0' to '9'
+        c - 48
+    } else if (c >= 65 && c <= 70) { // 'A' to 'F'
+        c - 55
+    } else if (c >= 97 && c <= 102) { // 'a' to 'f'
+        c - 87
+    } else {
+        abort 1
+    }
+}
+
+ + + +
+ + + +## Function `base16_utf8_to_vec_u8` + + + +
public fun base16_utf8_to_vec_u8(str: vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
public fun base16_utf8_to_vec_u8(str: vector<u8>): vector<u8> {
+    let result = vector::empty<u8>();
+    let i = 0;
+    while (i < vector::length(&str)) {
+        let c1 = vector::borrow(&str, i);
+        let c2 = vector::borrow(&str, i + 1);
+        let byte = hex_char_to_u8(*c1) << 4 | hex_char_to_u8(*c2);
+        vector::push_back(&mut result, byte);
+        i = i + 2;
+    };
+    result
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `base16_utf8_to_vec_u8` + + +
public fun base16_utf8_to_vec_u8(str: vector<u8>): vector<u8>
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_base16_utf8_to_vec_u8(str);
+
+ + + + + + + +
fun spec_base16_utf8_to_vec_u8(str: vector<u8>): vector<u8>;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/bcs_stream.md b/aptos-move/framework/aptos-stdlib/doc/bcs_stream.md new file mode 100644 index 0000000000000..f41d26c151cfe --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/bcs_stream.md @@ -0,0 +1,685 @@ + + + +# Module `0x1::bcs_stream` + +This module enables the deserialization of BCS-formatted byte arrays into Move primitive types. +Deserialization Strategies: +- Per-Byte Deserialization: Employed for most types to ensure lower gas consumption, this method processes each byte +individually to match the length and type requirements of target Move types. +- Exception: For the deserialize_address function, the function-based approach from aptos_std::from_bcs is used +due to type constraints, even though it is generally more gas-intensive. +- This can be optimized further by introducing native vector slices. +Application: +- This deserializer is particularly valuable for processing BCS serialized data within Move modules, +especially useful for systems requiring cross-chain message interpretation or off-chain data verification. + + +- [Struct `BCSStream`](#0x1_bcs_stream_BCSStream) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_bcs_stream_new) +- [Function `has_remaining`](#0x1_bcs_stream_has_remaining) +- [Function `deserialize_uleb128`](#0x1_bcs_stream_deserialize_uleb128) +- [Function `deserialize_bool`](#0x1_bcs_stream_deserialize_bool) +- [Function `deserialize_address`](#0x1_bcs_stream_deserialize_address) +- [Function `deserialize_u8`](#0x1_bcs_stream_deserialize_u8) +- [Function `deserialize_u16`](#0x1_bcs_stream_deserialize_u16) +- [Function `deserialize_u32`](#0x1_bcs_stream_deserialize_u32) +- [Function `deserialize_u64`](#0x1_bcs_stream_deserialize_u64) +- [Function `deserialize_u128`](#0x1_bcs_stream_deserialize_u128) +- [Function `deserialize_u256`](#0x1_bcs_stream_deserialize_u256) +- [Function `deserialize_u256_entry`](#0x1_bcs_stream_deserialize_u256_entry) +- [Function `deserialize_vector`](#0x1_bcs_stream_deserialize_vector) +- [Function `deserialize_string`](#0x1_bcs_stream_deserialize_string) +- [Function `deserialize_option`](#0x1_bcs_stream_deserialize_option) +- [Specification](#@Specification_1) + + +
use 0x1::error;
+use 0x1::from_bcs;
+use 0x1::string;
+use 0x1::vector;
+
+ + + + + +## Struct `BCSStream` + + + +
struct BCSStream has drop
+
+ + + +
+Fields + + +
+
+data: vector<u8> +
+
+ Byte buffer containing the serialized data. +
+
+cur: u64 +
+
+ Cursor indicating the current position in the byte buffer. +
+
+ + +
+ + + +## Constants + + + + +The data does not fit the expected format. + + +
const EMALFORMED_DATA: u64 = 1;
+
+ + + + + +There are not enough bytes to deserialize for the given type. + + +
const EOUT_OF_BYTES: u64 = 2;
+
+ + + + + +## Function `new` + +Constructs a new BCSStream instance from the provided byte array. + + +
public fun new(data: vector<u8>): bcs_stream::BCSStream
+
+ + + +
+Implementation + + +
public fun new(data: vector<u8>): BCSStream {
+    BCSStream {
+        data,
+        cur: 0,
+    }
+}
+
+ + + +
+ + + +## Function `has_remaining` + + + +
public fun has_remaining(stream: &mut bcs_stream::BCSStream): bool
+
+ + + +
+Implementation + + +
public fun has_remaining(stream: &mut BCSStream): bool {
+    stream.cur < stream.data.length()
+}
+
+ + + +
+ + + +## Function `deserialize_uleb128` + +Deserializes a ULEB128-encoded integer from the stream. +In the BCS format, lengths of vectors are represented using ULEB128 encoding. + + +
public fun deserialize_uleb128(stream: &mut bcs_stream::BCSStream): u64
+
+ + + +
+Implementation + + +
public fun deserialize_uleb128(stream: &mut BCSStream): u64 {
+    let res = 0;
+    let shift = 0;
+
+    while (stream.cur < stream.data.length()) {
+        let byte = stream.data[stream.cur];
+        stream.cur += 1;
+
+        let val = ((byte & 0x7f) as u64);
+        if (((val << shift) >> shift) != val) {
+            abort error::invalid_argument(EMALFORMED_DATA)
+        };
+        res |= (val << shift);
+
+        if ((byte & 0x80) == 0) {
+            if (shift > 0 && val == 0) {
+                abort error::invalid_argument(EMALFORMED_DATA)
+            };
+            return res
+        };
+
+        shift += 7;
+        if (shift > 64) {
+            abort error::invalid_argument(EMALFORMED_DATA)
+        };
+    };
+
+    abort error::out_of_range(EOUT_OF_BYTES)
+}
+
+ + + +
+ + + +## Function `deserialize_bool` + +Deserializes a bool value from the stream. + + +
public fun deserialize_bool(stream: &mut bcs_stream::BCSStream): bool
+
+ + + +
+Implementation + + +
public fun deserialize_bool(stream: &mut BCSStream): bool {
+    assert!(stream.cur < stream.data.length(), error::out_of_range(EOUT_OF_BYTES));
+    let byte = stream.data[stream.cur];
+    stream.cur += 1;
+    if (byte == 0) {
+        false
+    } else if (byte == 1) {
+        true
+    } else {
+        abort error::invalid_argument(EMALFORMED_DATA)
+    }
+}
+
+ + + +
+ + + +## Function `deserialize_address` + +Deserializes an address value from the stream. +32-byte address values are serialized using little-endian byte order. +This function utilizes the to_address function from the aptos_std::from_bcs module, +because the Move type system does not permit per-byte referencing of addresses. + + +
public fun deserialize_address(stream: &mut bcs_stream::BCSStream): address
+
+ + + +
+Implementation + + +
public fun deserialize_address(stream: &mut BCSStream): address {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 32 <= data.length(), error::out_of_range(EOUT_OF_BYTES));
+    let res = from_bcs::to_address(data.slice(cur, cur + 32));
+
+    stream.cur = cur + 32;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u8` + +Deserializes a u8 value from the stream. +1-byte u8 values are serialized using little-endian byte order. + + +
public fun deserialize_u8(stream: &mut bcs_stream::BCSStream): u8
+
+ + + +
+Implementation + + +
public fun deserialize_u8(stream: &mut BCSStream): u8 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur < data.length(), error::out_of_range(EOUT_OF_BYTES));
+
+    let res = data[cur];
+
+    stream.cur = cur + 1;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u16` + +Deserializes a u16 value from the stream. +2-byte u16 values are serialized using little-endian byte order. + + +
public fun deserialize_u16(stream: &mut bcs_stream::BCSStream): u16
+
+ + + +
+Implementation + + +
public fun deserialize_u16(stream: &mut BCSStream): u16 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 2 <= data.length(), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (data[cur] as u16) |
+            ((data[cur + 1] as u16) << 8)
+    ;
+
+    stream.cur += 2;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u32` + +Deserializes a u32 value from the stream. +4-byte u32 values are serialized using little-endian byte order. + + +
public fun deserialize_u32(stream: &mut bcs_stream::BCSStream): u32
+
+ + + +
+Implementation + + +
public fun deserialize_u32(stream: &mut BCSStream): u32 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 4 <= data.length(), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (data[cur] as u32) |
+            ((data[cur + 1] as u32) << 8) |
+            ((data[cur + 2] as u32) << 16) |
+            ((data[cur + 3] as u32) << 24)
+    ;
+
+    stream.cur += 4;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u64` + +Deserializes a u64 value from the stream. +8-byte u64 values are serialized using little-endian byte order. + + +
public fun deserialize_u64(stream: &mut bcs_stream::BCSStream): u64
+
+ + + +
+Implementation + + +
public fun deserialize_u64(stream: &mut BCSStream): u64 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 8 <= data.length(), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (data[cur] as u64) |
+            ((data[cur + 1] as u64) << 8) |
+            ((data[cur + 2] as u64) << 16) |
+            ((data[cur + 3] as u64) << 24) |
+            ((data[cur + 4] as u64) << 32) |
+            ((data[cur + 5] as u64) << 40) |
+            ((data[cur + 6] as u64) << 48) |
+            ((data[cur + 7] as u64) << 56)
+    ;
+
+    stream.cur += 8;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u128` + +Deserializes a u128 value from the stream. +16-byte u128 values are serialized using little-endian byte order. + + +
public fun deserialize_u128(stream: &mut bcs_stream::BCSStream): u128
+
+ + + +
+Implementation + + +
public fun deserialize_u128(stream: &mut BCSStream): u128 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 16 <= data.length(), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (data[cur] as u128) |
+            ((data[cur + 1] as u128) << 8) |
+            ((data[cur + 2] as u128) << 16) |
+            ((data[cur + 3] as u128) << 24) |
+            ((data[cur + 4] as u128) << 32) |
+            ((data[cur + 5] as u128) << 40) |
+            ((data[cur + 6] as u128) << 48) |
+            ((data[cur + 7] as u128) << 56) |
+            ((data[cur + 8] as u128) << 64) |
+            ((data[cur + 9] as u128) << 72) |
+            ((data[cur + 10] as u128) << 80) |
+            ((data[cur + 11] as u128) << 88) |
+            ((data[cur + 12] as u128) << 96) |
+            ((data[cur + 13] as u128) << 104) |
+            ((data[cur + 14] as u128) << 112) |
+            ((data[cur + 15] as u128) << 120)
+    ;
+
+    stream.cur += 16;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u256` + +Deserializes a u256 value from the stream. +32-byte u256 values are serialized using little-endian byte order. + + +
public fun deserialize_u256(stream: &mut bcs_stream::BCSStream): u256
+
+ + + +
+Implementation + + +
public fun deserialize_u256(stream: &mut BCSStream): u256 {
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + 32 <= data.length(), error::out_of_range(EOUT_OF_BYTES));
+    let res =
+        (data[cur] as u256) |
+            ((data[cur + 1] as u256) << 8) |
+            ((data[cur + 2] as u256) << 16) |
+            ((data[cur + 3] as u256) << 24) |
+            ((data[cur + 4] as u256) << 32) |
+            ((data[cur + 5] as u256) << 40) |
+            ((data[cur + 6] as u256) << 48) |
+            ((data[cur + 7] as u256) << 56) |
+            ((data[cur + 8] as u256) << 64) |
+            ((data[cur + 9] as u256) << 72) |
+            ((data[cur + 10] as u256) << 80) |
+            ((data[cur + 11] as u256) << 88) |
+            ((data[cur + 12] as u256) << 96) |
+            ((data[cur + 13] as u256) << 104) |
+            ((data[cur + 14] as u256) << 112) |
+            ((data[cur + 15] as u256) << 120) |
+            ((data[cur + 16] as u256) << 128) |
+            ((data[cur + 17] as u256) << 136) |
+            ((data[cur + 18] as u256) << 144) |
+            ((data[cur + 19] as u256) << 152) |
+            ((data[cur + 20] as u256) << 160) |
+            ((data[cur + 21] as u256) << 168) |
+            ((data[cur + 22] as u256) << 176) |
+            ((data[cur + 23] as u256) << 184) |
+            ((data[cur + 24] as u256) << 192) |
+            ((data[cur + 25] as u256) << 200) |
+            ((data[cur + 26] as u256) << 208) |
+            ((data[cur + 27] as u256) << 216) |
+            ((data[cur + 28] as u256) << 224) |
+            ((data[cur + 29] as u256) << 232) |
+            ((data[cur + 30] as u256) << 240) |
+            ((data[cur + 31] as u256) << 248);
+
+    stream.cur += 32;
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_u256_entry` + +Deserializes a u256 value from the stream. + + +
public entry fun deserialize_u256_entry(data: vector<u8>, cursor: u64)
+
+ + + +
+Implementation + + +
public entry fun deserialize_u256_entry(data: vector<u8>, cursor: u64) {
+    let stream = BCSStream {
+        data,
+        cur: cursor,
+    };
+    deserialize_u256(&mut stream);
+}
+
+ + + +
+ + + +## Function `deserialize_vector` + +Deserializes an array of BCS deserializable elements from the stream. +First, reads the length of the vector, which is in uleb128 format. +After determining the length, it then reads the contents of the vector. +The elem_deserializer lambda expression is used sequentially to deserialize each element of the vector. + + +
public fun deserialize_vector<E>(stream: &mut bcs_stream::BCSStream, elem_deserializer: |&mut bcs_stream::BCSStream|E): vector<E>
+
+ + + +
+Implementation + + +
public inline fun deserialize_vector<E>(stream: &mut BCSStream, elem_deserializer: |&mut BCSStream| E): vector<E> {
+    let len = deserialize_uleb128(stream);
+    let v = vector::empty();
+
+    for (i in 0..len) {
+        v.push_back(elem_deserializer(stream));
+    };
+
+    v
+}
+
+ + + +
+ + + +## Function `deserialize_string` + +Deserializes utf-8 String from the stream. +First, reads the length of the String, which is in uleb128 format. +After determining the length, it then reads the contents of the String. + + +
public fun deserialize_string(stream: &mut bcs_stream::BCSStream): string::String
+
+ + + +
+Implementation + + +
public fun deserialize_string(stream: &mut BCSStream): String {
+    let len = deserialize_uleb128(stream);
+    let data = &stream.data;
+    let cur = stream.cur;
+
+    assert!(cur + len <= data.length(), error::out_of_range(EOUT_OF_BYTES));
+
+    let res = string::utf8(data.slice(cur, cur + len));
+    stream.cur = cur + len;
+
+    res
+}
+
+ + + +
+ + + +## Function `deserialize_option` + +Deserializes Option from the stream. +First, reads a single byte representing the presence (0x01) or absence (0x00) of data. +After determining the presence of data, it then reads the actual data if present. +The elem_deserializer lambda expression is used to deserialize the element contained within the Option. + + +
public fun deserialize_option<E>(stream: &mut bcs_stream::BCSStream, elem_deserializer: |&mut bcs_stream::BCSStream|E): option::Option<E>
+
+ + + +
+Implementation + + +
public inline fun deserialize_option<E>(stream: &mut BCSStream, elem_deserializer: |&mut BCSStream| E): Option<E> {
+    let is_data = deserialize_bool(stream);
+    if (is_data) {
+        option::some(elem_deserializer(stream))
+    } else {
+        option::none()
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/big_vector.md b/aptos-move/framework/aptos-stdlib/doc/big_vector.md index cd3c8996c6564..f7000f4e40112 100644 --- a/aptos-move/framework/aptos-stdlib/doc/big_vector.md +++ b/aptos-move/framework/aptos-stdlib/doc/big_vector.md @@ -152,7 +152,7 @@ Create an empty vector. Implementation -
public(friend) fun empty<T: store>(bucket_size: u64): BigVector<T> {
+
friend fun empty<T: store>(bucket_size: u64): BigVector<T> {
     assert!(bucket_size > 0, error::invalid_argument(EZERO_BUCKET_SIZE));
     BigVector {
         buckets: table_with_length::new(),
@@ -182,9 +182,9 @@ Create a vector of length 1 containing the passed in element.
 Implementation
 
 
-
public(friend) fun singleton<T: store>(element: T, bucket_size: u64): BigVector<T> {
+
friend fun singleton<T: store>(element: T, bucket_size: u64): BigVector<T> {
     let v = empty(bucket_size);
-    push_back(&mut v, element);
+    v.push_back(element);
     v
 }
 
@@ -197,11 +197,11 @@ Create a vector of length 1 containing the passed in element. ## Function `destroy_empty` -Destroy the vector v. -Aborts if v is not empty. +Destroy the vector self. +Aborts if self is not empty. -
public fun destroy_empty<T>(v: big_vector::BigVector<T>)
+
public fun destroy_empty<T>(self: big_vector::BigVector<T>)
 
@@ -210,10 +210,10 @@ Aborts if v is not empty. Implementation -
public fun destroy_empty<T>(v: BigVector<T>) {
-    assert!(is_empty(&v), error::invalid_argument(EVECTOR_NOT_EMPTY));
-    let BigVector { buckets, end_index: _, bucket_size: _ } = v;
-    table_with_length::destroy_empty(buckets);
+
public fun destroy_empty<T>(self: BigVector<T>) {
+    assert!(self.is_empty(), error::invalid_argument(EVECTOR_NOT_EMPTY));
+    let BigVector { buckets, end_index: _, bucket_size: _ } = self;
+    buckets.destroy_empty();
 }
 
@@ -225,10 +225,10 @@ Aborts if v is not empty. ## Function `destroy` -Destroy the vector v if T has drop +Destroy the vector self if T has drop -
public fun destroy<T: drop>(v: big_vector::BigVector<T>)
+
public fun destroy<T: drop>(self: big_vector::BigVector<T>)
 
@@ -237,15 +237,15 @@ Destroy the vector v if T has drop Implementation -
public fun destroy<T: drop>(v: BigVector<T>) {
-    let BigVector { buckets, end_index, bucket_size: _ } = v;
+
public fun destroy<T: drop>(self: BigVector<T>) {
+    let BigVector { buckets, end_index, bucket_size: _ } = self;
     let i = 0;
     while (end_index > 0) {
-        let num_elements = vector::length(&table_with_length::remove(&mut buckets, i));
-        end_index = end_index - num_elements;
-        i = i + 1;
+        let num_elements = buckets.remove(i).length();
+        end_index -= num_elements;
+        i += 1;
     };
-    table_with_length::destroy_empty(buckets);
+    buckets.destroy_empty();
 }
 
@@ -257,11 +257,11 @@ Destroy the vector v if T has drop ## Function `borrow` -Acquire an immutable reference to the ith element of the vector v. +Acquire an immutable reference to the ith element of the vector self. Aborts if i is out of bounds. -
public fun borrow<T>(v: &big_vector::BigVector<T>, i: u64): &T
+
public fun borrow<T>(self: &big_vector::BigVector<T>, i: u64): &T
 
@@ -270,9 +270,9 @@ Aborts if i is out of bounds. Implementation -
public fun borrow<T>(v: &BigVector<T>, i: u64): &T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    vector::borrow(table_with_length::borrow(&v.buckets, i / v.bucket_size), i % v.bucket_size)
+
public fun borrow<T>(self: &BigVector<T>, i: u64): &T {
+    assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    self.buckets.borrow(i / self.bucket_size).borrow(i % self.bucket_size)
 }
 
@@ -284,11 +284,11 @@ Aborts if i is out of bounds. ## Function `borrow_mut` -Return a mutable reference to the ith element in the vector v. +Return a mutable reference to the ith element in the vector self. Aborts if i is out of bounds. -
public fun borrow_mut<T>(v: &mut big_vector::BigVector<T>, i: u64): &mut T
+
public fun borrow_mut<T>(self: &mut big_vector::BigVector<T>, i: u64): &mut T
 
@@ -297,9 +297,9 @@ Aborts if i is out of bounds. Implementation -
public fun borrow_mut<T>(v: &mut BigVector<T>, i: u64): &mut T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    vector::borrow_mut(table_with_length::borrow_mut(&mut v.buckets, i / v.bucket_size), i % v.bucket_size)
+
public fun borrow_mut<T>(self: &mut BigVector<T>, i: u64): &mut T {
+    assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    self.buckets.borrow_mut(i / self.bucket_size).borrow_mut(i % self.bucket_size)
 }
 
@@ -311,12 +311,12 @@ Aborts if i is out of bounds. ## Function `append` -Empty and destroy the other vector, and push each of the elements in the other vector onto the lhs vector in the +Empty and destroy the other vector, and push each of the elements in the other vector onto the self vector in the same order as they occurred in other. Disclaimer: This function is costly. Use it at your own discretion. -
public fun append<T: store>(lhs: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
+
public fun append<T: store>(self: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
 
@@ -325,19 +325,19 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun append<T: store>(lhs: &mut BigVector<T>, other: BigVector<T>) {
-    let other_len = length(&other);
+
public fun append<T: store>(self: &mut BigVector<T>, other: BigVector<T>) {
+    let other_len = other.length();
     let half_other_len = other_len / 2;
     let i = 0;
     while (i < half_other_len) {
-        push_back(lhs, swap_remove(&mut other, i));
-        i = i + 1;
+        self.push_back(other.swap_remove(i));
+        i += 1;
     };
     while (i < other_len) {
-        push_back(lhs, pop_back(&mut other));
-        i = i + 1;
+        self.push_back(other.pop_back());
+        i += 1;
     };
-    destroy_empty(other);
+    other.destroy_empty();
 }
 
@@ -349,11 +349,11 @@ Disclaimer: This function is costly. Use it at your own discretion. ## Function `push_back` -Add element val to the end of the vector v. It grows the buckets when the current buckets are full. +Add element val to the end of the vector self. It grows the buckets when the current buckets are full. This operation will cost more gas when it adds new bucket. -
public fun push_back<T: store>(v: &mut big_vector::BigVector<T>, val: T)
+
public fun push_back<T: store>(self: &mut big_vector::BigVector<T>, val: T)
 
@@ -362,15 +362,15 @@ This operation will cost more gas when it adds new bucket. Implementation -
public fun push_back<T: store>(v: &mut BigVector<T>, val: T) {
-    let num_buckets = table_with_length::length(&v.buckets);
-    if (v.end_index == num_buckets * v.bucket_size) {
-        table_with_length::add(&mut v.buckets, num_buckets, vector::empty());
-        vector::push_back(table_with_length::borrow_mut(&mut v.buckets, num_buckets), val);
+
public fun push_back<T: store>(self: &mut BigVector<T>, val: T) {
+    let num_buckets = self.buckets.length();
+    if (self.end_index == num_buckets * self.bucket_size) {
+        self.buckets.add(num_buckets, vector::empty());
+        self.buckets.borrow_mut(num_buckets).push_back(val);
     } else {
-        vector::push_back(table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1), val);
+        self.buckets.borrow_mut(num_buckets - 1).push_back(val);
     };
-    v.end_index = v.end_index + 1;
+    self.end_index += 1;
 }
 
@@ -382,12 +382,12 @@ This operation will cost more gas when it adds new bucket. ## Function `pop_back` -Pop an element from the end of vector v. It doesn't shrink the buckets even if they're empty. +Pop an element from the end of vector self. It doesn't shrink the buckets even if they're empty. Call shrink_to_fit explicity to deallocate empty buckets. -Aborts if v is empty. +Aborts if self is empty. -
public fun pop_back<T>(v: &mut big_vector::BigVector<T>): T
+
public fun pop_back<T>(self: &mut big_vector::BigVector<T>): T
 
@@ -396,17 +396,17 @@ Aborts if v is empty. Implementation -
public fun pop_back<T>(v: &mut BigVector<T>): T {
-    assert!(!is_empty(v), error::invalid_state(EVECTOR_EMPTY));
-    let num_buckets = table_with_length::length(&v.buckets);
-    let last_bucket = table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1);
-    let val = vector::pop_back(last_bucket);
+
public fun pop_back<T>(self: &mut BigVector<T>): T {
+    assert!(!self.is_empty(), error::invalid_state(EVECTOR_EMPTY));
+    let num_buckets = self.buckets.length();
+    let last_bucket = self.buckets.borrow_mut(num_buckets - 1);
+    let val = last_bucket.pop_back();
     // Shrink the table if the last vector is empty.
-    if (vector::is_empty(last_bucket)) {
+    if (last_bucket.is_empty()) {
         move last_bucket;
-        vector::destroy_empty(table_with_length::remove(&mut v.buckets, num_buckets - 1));
+        self.buckets.remove(num_buckets - 1).destroy_empty();
     };
-    v.end_index = v.end_index - 1;
+    self.end_index -= 1;
     val
 }
 
@@ -419,12 +419,12 @@ Aborts if v is empty. ## Function `remove` -Remove the element at index i in the vector v and return the owned value that was previously stored at i in v. +Remove the element at index i in the vector v and return the owned value that was previously stored at i in self. All elements occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. Disclaimer: This function is costly. Use it at your own discretion. -
public fun remove<T>(v: &mut big_vector::BigVector<T>, i: u64): T
+
public fun remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
 
@@ -433,40 +433,40 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun remove<T>(v: &mut BigVector<T>, i: u64): T {
-    let len = length(v);
+
public fun remove<T>(self: &mut BigVector<T>, i: u64): T {
+    let len = self.length();
     assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let num_buckets = table_with_length::length(&v.buckets);
-    let cur_bucket_index = i / v.bucket_size + 1;
-    let cur_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index - 1);
-    let res = vector::remove(cur_bucket, i % v.bucket_size);
-    v.end_index = v.end_index - 1;
+    let num_buckets = self.buckets.length();
+    let cur_bucket_index = i / self.bucket_size + 1;
+    let cur_bucket = self.buckets.borrow_mut(cur_bucket_index - 1);
+    let res = cur_bucket.remove(i % self.bucket_size);
+    self.end_index -= 1;
     move cur_bucket;
     while ({
         spec {
             invariant cur_bucket_index <= num_buckets;
-            invariant table_with_length::spec_len(v.buckets) == num_buckets;
+            invariant table_with_length::spec_len(self.buckets) == num_buckets;
         };
         (cur_bucket_index < num_buckets)
     }) {
         // remove one element from the start of current vector
-        let cur_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index);
-        let t = vector::remove(cur_bucket, 0);
+        let cur_bucket = self.buckets.borrow_mut(cur_bucket_index);
+        let t = cur_bucket.remove(0);
         move cur_bucket;
         // and put it at the end of the last one
-        let prev_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index - 1);
-        vector::push_back(prev_bucket, t);
-        cur_bucket_index = cur_bucket_index + 1;
+        let prev_bucket = self.buckets.borrow_mut(cur_bucket_index - 1);
+        prev_bucket.push_back(t);
+        cur_bucket_index += 1;
     };
     spec {
         assert cur_bucket_index == num_buckets;
     };
 
     // Shrink the table if the last vector is empty.
-    let last_bucket = table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1);
-    if (vector::is_empty(last_bucket)) {
+    let last_bucket = self.buckets.borrow_mut(num_buckets - 1);
+    if (last_bucket.is_empty()) {
         move last_bucket;
-        vector::destroy_empty(table_with_length::remove(&mut v.buckets, num_buckets - 1));
+        self.buckets.remove(num_buckets - 1).destroy_empty();
     };
 
     res
@@ -481,12 +481,12 @@ Disclaimer: This function is costly. Use it at your own discretion.
 
 ## Function `swap_remove`
 
-Swap the ith element of the vector v with the last element and then pop the vector.
+Swap the ith element of the vector self with the last element and then pop the vector.
 This is O(1), but does not preserve ordering of elements in the vector.
 Aborts if i is out of bounds.
 
 
-
public fun swap_remove<T>(v: &mut big_vector::BigVector<T>, i: u64): T
+
public fun swap_remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
 
@@ -495,20 +495,20 @@ Aborts if i is out of bounds. Implementation -
public fun swap_remove<T>(v: &mut BigVector<T>, i: u64): T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let last_val = pop_back(v);
+
public fun swap_remove<T>(self: &mut BigVector<T>, i: u64): T {
+    assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let last_val = self.pop_back();
     // if the requested value is the last one, return it
-    if (v.end_index == i) {
+    if (self.end_index == i) {
         return last_val
     };
-    // because the lack of mem::swap, here we swap remove the requested value from the bucket
+    // because the lack of mem::swap, here we swap remove the requested value from the bucket
     // and append the last_val to the bucket then swap the last bucket val back
-    let bucket = table_with_length::borrow_mut(&mut v.buckets, i / v.bucket_size);
-    let bucket_len = vector::length(bucket);
-    let val = vector::swap_remove(bucket, i % v.bucket_size);
-    vector::push_back(bucket, last_val);
-    vector::swap(bucket, i % v.bucket_size, bucket_len - 1);
+    let bucket = self.buckets.borrow_mut(i / self.bucket_size);
+    let bucket_len = bucket.length();
+    let val = bucket.swap_remove(i % self.bucket_size);
+    bucket.push_back(last_val);
+    bucket.swap(i % self.bucket_size, bucket_len - 1);
     val
 }
 
@@ -521,11 +521,11 @@ Aborts if i is out of bounds. ## Function `swap` -Swap the elements at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds -for v. +Swap the elements at the i'th and j'th indices in the vector self. Will abort if either of i or j are out of bounds +for self. -
public fun swap<T>(v: &mut big_vector::BigVector<T>, i: u64, j: u64)
+
public fun swap<T>(self: &mut big_vector::BigVector<T>, i: u64, j: u64)
 
@@ -534,33 +534,33 @@ for v. Implementation -
public fun swap<T>(v: &mut BigVector<T>, i: u64, j: u64) {
-    assert!(i < length(v) && j < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let i_bucket_index = i / v.bucket_size;
-    let j_bucket_index = j / v.bucket_size;
-    let i_vector_index = i % v.bucket_size;
-    let j_vector_index = j % v.bucket_size;
+
public fun swap<T>(self: &mut BigVector<T>, i: u64, j: u64) {
+    assert!(i < self.length() && j < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let i_bucket_index = i / self.bucket_size;
+    let j_bucket_index = j / self.bucket_size;
+    let i_vector_index = i % self.bucket_size;
+    let j_vector_index = j % self.bucket_size;
     if (i_bucket_index == j_bucket_index) {
-        vector::swap(table_with_length::borrow_mut(&mut v.buckets, i_bucket_index), i_vector_index, j_vector_index);
+        self.buckets.borrow_mut(i_bucket_index).swap(i_vector_index, j_vector_index);
         return
     };
     // If i and j are in different buckets, take the buckets out first for easy mutation.
-    let bucket_i = table_with_length::remove(&mut v.buckets, i_bucket_index);
-    let bucket_j = table_with_length::remove(&mut v.buckets, j_bucket_index);
+    let bucket_i = self.buckets.remove(i_bucket_index);
+    let bucket_j = self.buckets.remove(j_bucket_index);
     // Get the elements from buckets by calling `swap_remove`.
-    let element_i = vector::swap_remove(&mut bucket_i, i_vector_index);
-    let element_j = vector::swap_remove(&mut bucket_j, j_vector_index);
+    let element_i = bucket_i.swap_remove(i_vector_index);
+    let element_j = bucket_j.swap_remove(j_vector_index);
     // Swap the elements and push back to the other bucket.
-    vector::push_back(&mut bucket_i, element_j);
-    vector::push_back(&mut bucket_j, element_i);
-    let last_index_in_bucket_i = vector::length(&bucket_i) - 1;
-    let last_index_in_bucket_j = vector::length(&bucket_j) - 1;
+    bucket_i.push_back(element_j);
+    bucket_j.push_back(element_i);
+    let last_index_in_bucket_i = bucket_i.length() - 1;
+    let last_index_in_bucket_j = bucket_j.length() - 1;
     // Re-position the swapped elements to the right index.
-    vector::swap(&mut bucket_i, i_vector_index, last_index_in_bucket_i);
-    vector::swap(&mut bucket_j, j_vector_index, last_index_in_bucket_j);
+    bucket_i.swap(i_vector_index, last_index_in_bucket_i);
+    bucket_j.swap(j_vector_index, last_index_in_bucket_j);
     // Add back the buckets.
-    table_with_length::add(&mut v.buckets, i_bucket_index, bucket_i);
-    table_with_length::add(&mut v.buckets, j_bucket_index, bucket_j);
+    self.buckets.add(i_bucket_index, bucket_i);
+    self.buckets.add(j_bucket_index, bucket_j);
 }
 
@@ -572,11 +572,11 @@ for v. ## Function `reverse` -Reverse the order of the elements in the vector v in-place. +Reverse the order of the elements in the vector self in-place. Disclaimer: This function is costly. Use it at your own discretion. -
public fun reverse<T>(v: &mut big_vector::BigVector<T>)
+
public fun reverse<T>(self: &mut big_vector::BigVector<T>)
 
@@ -585,38 +585,36 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun reverse<T>(v: &mut BigVector<T>) {
+
public fun reverse<T>(self: &mut BigVector<T>) {
     let new_buckets = vector[];
     let push_bucket = vector[];
-    let num_buckets = table_with_length::length(&v.buckets);
+    let num_buckets = self.buckets.length();
     let num_buckets_left = num_buckets;
 
     while (num_buckets_left > 0) {
-        let pop_bucket = table_with_length::remove(&mut v.buckets, num_buckets_left - 1);
-        vector::for_each_reverse(pop_bucket, |val| {
-            vector::push_back(&mut push_bucket, val);
-            if (vector::length(&push_bucket) == v.bucket_size) {
-                vector::push_back(&mut new_buckets, push_bucket);
+        let pop_bucket = self.buckets.remove(num_buckets_left - 1);
+        pop_bucket.for_each_reverse(|val| {
+            push_bucket.push_back(val);
+            if (push_bucket.length() == self.bucket_size) {
+                new_buckets.push_back(push_bucket);
                 push_bucket = vector[];
             };
         });
-        num_buckets_left = num_buckets_left - 1;
+        num_buckets_left -= 1;
     };
 
-    if (vector::length(&push_bucket) > 0) {
-        vector::push_back(&mut new_buckets, push_bucket);
+    if (push_bucket.length() > 0) {
+        new_buckets.push_back(push_bucket);
     } else {
-        vector::destroy_empty(push_bucket);
+        push_bucket.destroy_empty();
     };
 
-    vector::reverse(&mut new_buckets);
-    let i = 0;
-    assert!(table_with_length::length(&v.buckets) == 0, 0);
-    while (i < num_buckets) {
-        table_with_length::add(&mut v.buckets, i, vector::pop_back(&mut new_buckets));
-        i = i + 1;
+    new_buckets.reverse();
+    assert!(self.buckets.length() == 0, 0);
+    for (i in 0..num_buckets) {
+        self.buckets.add(i, new_buckets.pop_back());
     };
-    vector::destroy_empty(new_buckets);
+    new_buckets.destroy_empty();
 }
 
@@ -628,12 +626,12 @@ Disclaimer: This function is costly. Use it at your own discretion. ## Function `index_of` -Return the index of the first occurrence of an element in v that is equal to e. Returns (true, index) if such an +Return the index of the first occurrence of an element in self that is equal to e. Returns (true, index) if such an element was found, and (false, 0) otherwise. Disclaimer: This function is costly. Use it at your own discretion. -
public fun index_of<T>(v: &big_vector::BigVector<T>, val: &T): (bool, u64)
+
public fun index_of<T>(self: &big_vector::BigVector<T>, val: &T): (bool, u64)
 
@@ -642,16 +640,16 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun index_of<T>(v: &BigVector<T>, val: &T): (bool, u64) {
-    let num_buckets = table_with_length::length(&v.buckets);
+
public fun index_of<T>(self: &BigVector<T>, val: &T): (bool, u64) {
+    let num_buckets = self.buckets.length();
     let bucket_index = 0;
     while (bucket_index < num_buckets) {
-        let cur = table_with_length::borrow(&v.buckets, bucket_index);
-        let (found, i) = vector::index_of(cur, val);
+        let cur = self.buckets.borrow(bucket_index);
+        let (found, i) = cur.index_of(val);
         if (found) {
-            return (true, bucket_index * v.bucket_size + i)
+            return (true, bucket_index * self.bucket_size + i)
         };
-        bucket_index = bucket_index + 1;
+        bucket_index += 1;
     };
     (false, 0)
 }
@@ -665,11 +663,11 @@ Disclaimer: This function is costly. Use it at your own discretion.
 
 ## Function `contains`
 
-Return if an element equal to e exists in the vector v.
+Return if an element equal to e exists in the vector self.
 Disclaimer: This function is costly. Use it at your own discretion.
 
 
-
public fun contains<T>(v: &big_vector::BigVector<T>, val: &T): bool
+
public fun contains<T>(self: &big_vector::BigVector<T>, val: &T): bool
 
@@ -678,9 +676,9 @@ Disclaimer: This function is costly. Use it at your own discretion. Implementation -
public fun contains<T>(v: &BigVector<T>, val: &T): bool {
-    if (is_empty(v)) return false;
-    let (exist, _) = index_of(v, val);
+
public fun contains<T>(self: &BigVector<T>, val: &T): bool {
+    if (self.is_empty()) return false;
+    let (exist, _) = self.index_of(val);
     exist
 }
 
@@ -698,7 +696,7 @@ atomic view of the whole vector. Disclaimer: This function may be costly as the big vector may be huge in size. Use it at your own discretion. -
public fun to_vector<T: copy>(v: &big_vector::BigVector<T>): vector<T>
+
public fun to_vector<T: copy>(self: &big_vector::BigVector<T>): vector<T>
 
@@ -707,13 +705,11 @@ Disclaimer: This function may be costly as the big vector may be huge in size. U Implementation -
public fun to_vector<T: copy>(v: &BigVector<T>): vector<T> {
+
public fun to_vector<T: copy>(self: &BigVector<T>): vector<T> {
     let res = vector[];
-    let num_buckets = table_with_length::length(&v.buckets);
-    let i = 0;
-    while (i < num_buckets) {
-        vector::append(&mut res, *table_with_length::borrow(&v.buckets, i));
-        i = i + 1;
+    let num_buckets = self.buckets.length();
+    for (i in 0..num_buckets) {
+        res.append(*self.buckets.borrow(i));
     };
     res
 }
@@ -730,7 +726,7 @@ Disclaimer: This function may be costly as the big vector may be huge in size. U
 Return the length of the vector.
 
 
-
public fun length<T>(v: &big_vector::BigVector<T>): u64
+
public fun length<T>(self: &big_vector::BigVector<T>): u64
 
@@ -739,8 +735,8 @@ Return the length of the vector. Implementation -
public fun length<T>(v: &BigVector<T>): u64 {
-    v.end_index
+
public fun length<T>(self: &BigVector<T>): u64 {
+    self.end_index
 }
 
@@ -755,7 +751,7 @@ Return the length of the vector. Return true if the vector v has no elements and false otherwise. -
public fun is_empty<T>(v: &big_vector::BigVector<T>): bool
+
public fun is_empty<T>(self: &big_vector::BigVector<T>): bool
 
@@ -764,8 +760,8 @@ Return true if the vector v has no elements and Implementation -
public fun is_empty<T>(v: &BigVector<T>): bool {
-    length(v) == 0
+
public fun is_empty<T>(self: &BigVector<T>): bool {
+    self.length() == 0
 }
 
@@ -847,7 +843,7 @@ Return true if the vector v has no elements and
aborts_if bucket_size == 0;
-ensures length(result) == 0;
+ensures result.length() == 0;
 ensures result.bucket_size == bucket_size;
 
@@ -865,7 +861,7 @@ Return true if the vector v has no elements and
aborts_if bucket_size == 0;
-ensures length(result) == 1;
+ensures result.length() == 1;
 ensures result.bucket_size == bucket_size;
 
@@ -876,13 +872,13 @@ Return true if the vector v has no elements and ### Function `destroy_empty` -
public fun destroy_empty<T>(v: big_vector::BigVector<T>)
+
public fun destroy_empty<T>(self: big_vector::BigVector<T>)
 
-
aborts_if !is_empty(v);
+
aborts_if !self.is_empty();
 
@@ -892,14 +888,14 @@ Return true if the vector v has no elements and ### Function `borrow` -
public fun borrow<T>(v: &big_vector::BigVector<T>, i: u64): &T
+
public fun borrow<T>(self: &big_vector::BigVector<T>, i: u64): &T
 
-
aborts_if i >= length(v);
-ensures result == spec_at(v, i);
+
aborts_if i >= self.length();
+ensures result == spec_at(self, i);
 
@@ -909,14 +905,14 @@ Return true if the vector v has no elements and ### Function `borrow_mut` -
public fun borrow_mut<T>(v: &mut big_vector::BigVector<T>, i: u64): &mut T
+
public fun borrow_mut<T>(self: &mut big_vector::BigVector<T>, i: u64): &mut T
 
-
aborts_if i >= length(v);
-ensures result == spec_at(v, i);
+
aborts_if i >= self.length();
+ensures result == spec_at(self, i);
 
@@ -926,7 +922,7 @@ Return true if the vector v has no elements and ### Function `append` -
public fun append<T: store>(lhs: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
+
public fun append<T: store>(self: &mut big_vector::BigVector<T>, other: big_vector::BigVector<T>)
 
@@ -942,19 +938,19 @@ Return true if the vector v has no elements and ### Function `push_back` -
public fun push_back<T: store>(v: &mut big_vector::BigVector<T>, val: T)
+
public fun push_back<T: store>(self: &mut big_vector::BigVector<T>, val: T)
 
-
let num_buckets = spec_table_len(v.buckets);
+
let num_buckets = spec_table_len(self.buckets);
 include PushbackAbortsIf<T>;
-ensures length(v) == length(old(v)) + 1;
-ensures v.end_index == old(v.end_index) + 1;
-ensures spec_at(v, v.end_index-1) == val;
-ensures forall i in 0..v.end_index-1: spec_at(v, i) == spec_at(old(v), i);
-ensures v.bucket_size == old(v).bucket_size;
+ensures self.length() == old(self).length() + 1;
+ensures self.end_index == old(self.end_index) + 1;
+ensures spec_at(self, self.end_index-1) == val;
+ensures forall i in 0..self.end_index-1: spec_at(self, i) == spec_at(old(self), i);
+ensures self.bucket_size == old(self).bucket_size;
 
@@ -964,10 +960,10 @@ Return true if the vector v has no elements and
schema PushbackAbortsIf<T> {
-    v: BigVector<T>;
-    let num_buckets = spec_table_len(v.buckets);
-    aborts_if num_buckets * v.bucket_size > MAX_U64;
-    aborts_if v.end_index + 1 > MAX_U64;
+    self: BigVector<T>;
+    let num_buckets = spec_table_len(self.buckets);
+    aborts_if num_buckets * self.bucket_size > MAX_U64;
+    aborts_if self.end_index + 1 > MAX_U64;
 }
 
@@ -978,16 +974,16 @@ Return true if the vector v has no elements and ### Function `pop_back` -
public fun pop_back<T>(v: &mut big_vector::BigVector<T>): T
+
public fun pop_back<T>(self: &mut big_vector::BigVector<T>): T
 
-
aborts_if is_empty(v);
-ensures length(v) == length(old(v)) - 1;
-ensures result == old(spec_at(v, v.end_index-1));
-ensures forall i in 0..v.end_index: spec_at(v, i) == spec_at(old(v), i);
+
aborts_if self.is_empty();
+ensures self.length() == old(self).length() - 1;
+ensures result == old(spec_at(self, self.end_index-1));
+ensures forall i in 0..self.end_index: spec_at(self, i) == spec_at(old(self), i);
 
@@ -997,7 +993,7 @@ Return true if the vector v has no elements and ### Function `remove` -
public fun remove<T>(v: &mut big_vector::BigVector<T>, i: u64): T
+
public fun remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
 
@@ -1013,16 +1009,16 @@ Return true if the vector v has no elements and ### Function `swap_remove` -
public fun swap_remove<T>(v: &mut big_vector::BigVector<T>, i: u64): T
+
public fun swap_remove<T>(self: &mut big_vector::BigVector<T>, i: u64): T
 
pragma verify_duration_estimate = 120;
-aborts_if i >= length(v);
-ensures length(v) == length(old(v)) - 1;
-ensures result == spec_at(old(v), i);
+aborts_if i >= self.length();
+ensures self.length() == old(self).length() - 1;
+ensures result == spec_at(old(self), i);
 
@@ -1032,20 +1028,20 @@ Return true if the vector v has no elements and ### Function `swap` -
public fun swap<T>(v: &mut big_vector::BigVector<T>, i: u64, j: u64)
+
public fun swap<T>(self: &mut big_vector::BigVector<T>, i: u64, j: u64)
 
pragma verify_duration_estimate = 1000;
-aborts_if i >= length(v) || j >= length(v);
-ensures length(v) == length(old(v));
-ensures spec_at(v, i) == spec_at(old(v), j);
-ensures spec_at(v, j) == spec_at(old(v), i);
-ensures forall idx in 0..length(v)
+aborts_if i >= self.length() || j >= self.length();
+ensures self.length() == old(self).length();
+ensures spec_at(self, i) == spec_at(old(self), j);
+ensures spec_at(self, j) == spec_at(old(self), i);
+ensures forall idx in 0..self.length()
     where idx != i && idx != j:
-    spec_at(v, idx) == spec_at(old(v), idx);
+    spec_at(self, idx) == spec_at(old(self), idx);
 
@@ -1055,7 +1051,7 @@ Return true if the vector v has no elements and ### Function `reverse` -
public fun reverse<T>(v: &mut big_vector::BigVector<T>)
+
public fun reverse<T>(self: &mut big_vector::BigVector<T>)
 
@@ -1071,7 +1067,7 @@ Return true if the vector v has no elements and ### Function `index_of` -
public fun index_of<T>(v: &big_vector::BigVector<T>, val: &T): (bool, u64)
+
public fun index_of<T>(self: &big_vector::BigVector<T>, val: &T): (bool, u64)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/bls12381.md b/aptos-move/framework/aptos-stdlib/doc/bls12381.md index da241d99c0969..b22d0efcfba91 100644 --- a/aptos-move/framework/aptos-stdlib/doc/bls12381.md +++ b/aptos-move/framework/aptos-stdlib/doc/bls12381.md @@ -747,7 +747,7 @@ Deserializes an aggregate-or-multi-signature from 96 bytes.
public fun aggr_or_multi_signature_from_bytes(bytes: vector<u8>): AggrOrMultiSignature {
-    assert!(std::vector::length(&bytes) == SIGNATURE_SIZE, std::error::invalid_argument(EWRONG_SIZE));
+    assert!(bytes.length() == SIGNATURE_SIZE, std::error::invalid_argument(EWRONG_SIZE));
 
     AggrOrMultiSignature {
         bytes
diff --git a/aptos-move/framework/aptos-stdlib/doc/capability.md b/aptos-move/framework/aptos-stdlib/doc/capability.md
index 5daf02b7ad916..cc89c3347e4c2 100644
--- a/aptos-move/framework/aptos-stdlib/doc/capability.md
+++ b/aptos-move/framework/aptos-stdlib/doc/capability.md
@@ -378,7 +378,7 @@ Helper to validate an acquire. Returns the root address of the capability.
         let root_addr = borrow_global<CapDelegateState<Feature>>(addr).root;
         // double check that requester is actually registered as a delegate
         assert!(exists<CapState<Feature>>(root_addr), error::invalid_state(EDELEGATE));
-        assert!(vector::contains(&borrow_global<CapState<Feature>>(root_addr).delegates, &addr),
+        assert!(borrow_global<CapState<Feature>>(root_addr).delegates.contains(&addr),
             error::invalid_state(EDELEGATE));
         root_addr
     } else {
@@ -400,7 +400,7 @@ Returns the root address associated with the given capability token. Only the ow
 of the feature can do this.
 
 
-
public fun root_addr<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature): address
+
public fun root_addr<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature): address
 
@@ -409,8 +409,8 @@ of the feature can do this. Implementation -
public fun root_addr<Feature>(cap: Cap<Feature>, _feature_witness: &Feature): address {
-    cap.root
+
public fun root_addr<Feature>(self: Cap<Feature>, _feature_witness: &Feature): address {
+    self.root
 }
 
@@ -425,7 +425,7 @@ of the feature can do this. Returns the root address associated with the given linear capability token. -
public fun linear_root_addr<Feature>(cap: capability::LinearCap<Feature>, _feature_witness: &Feature): address
+
public fun linear_root_addr<Feature>(self: capability::LinearCap<Feature>, _feature_witness: &Feature): address
 
@@ -434,8 +434,8 @@ Returns the root address associated with the given linear capability token. Implementation -
public fun linear_root_addr<Feature>(cap: LinearCap<Feature>, _feature_witness: &Feature): address {
-    cap.root
+
public fun linear_root_addr<Feature>(self: LinearCap<Feature>, _feature_witness: &Feature): address {
+    self.root
 }
 
@@ -451,7 +451,7 @@ Registers a delegation relation. If the relation already exists, this function d nothing. -
public fun delegate<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
+
public fun delegate<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
 
@@ -460,12 +460,12 @@ nothing. Implementation -
public fun delegate<Feature>(cap: Cap<Feature>, _feature_witness: &Feature, to: &signer)
+
public fun delegate<Feature>(self: Cap<Feature>, _feature_witness: &Feature, to: &signer)
 acquires CapState {
     let addr = signer::address_of(to);
     if (exists<CapDelegateState<Feature>>(addr)) return;
-    move_to(to, CapDelegateState<Feature> { root: cap.root });
-    add_element(&mut borrow_global_mut<CapState<Feature>>(cap.root).delegates, addr);
+    move_to(to, CapDelegateState<Feature> { root: self.root });
+    add_element(&mut borrow_global_mut<CapState<Feature>>(self.root).delegates, addr);
 }
 
@@ -480,7 +480,7 @@ nothing. Revokes a delegation relation. If no relation exists, this function does nothing. -
public fun revoke<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
+
public fun revoke<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
 
@@ -489,12 +489,12 @@ Revokes a delegation relation. If no relation exists, this function does nothing Implementation -
public fun revoke<Feature>(cap: Cap<Feature>, _feature_witness: &Feature, from: address)
+
public fun revoke<Feature>(self: Cap<Feature>, _feature_witness: &Feature, from: address)
 acquires CapState, CapDelegateState
 {
     if (!exists<CapDelegateState<Feature>>(from)) return;
     let CapDelegateState { root: _root } = move_from<CapDelegateState<Feature>>(from);
-    remove_element(&mut borrow_global_mut<CapState<Feature>>(cap.root).delegates, &from);
+    remove_element(&mut borrow_global_mut<CapState<Feature>>(self.root).delegates, &from);
 }
 
@@ -519,9 +519,9 @@ Helper to remove an element from a vector.
fun remove_element<E: drop>(v: &mut vector<E>, x: &E) {
-    let (found, index) = vector::index_of(v, x);
+    let (found, index) = v.index_of(x);
     if (found) {
-        vector::remove(v, index);
+        v.remove(index);
     }
 }
 
@@ -547,8 +547,8 @@ Helper to add an element to a vector.
fun add_element<E: drop>(v: &mut vector<E>, x: E) {
-    if (!vector::contains(v, &x)) {
-        vector::push_back(v, x)
+    if (!v.contains(&x)) {
+        v.push_back(x)
     }
 }
 
@@ -676,7 +676,7 @@ Helper specification function to check whether a delegated capability exists at ### Function `delegate` -
public fun delegate<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
+
public fun delegate<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, to: &signer)
 
@@ -684,8 +684,8 @@ Helper specification function to check whether a delegated capability exists at
let addr = signer::address_of(to);
 ensures spec_has_delegate_cap<Feature>(addr);
-ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> global<CapDelegateState<Feature>>(addr).root == cap.root;
-ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> vector::spec_contains(spec_delegates<Feature>(cap.root), addr);
+ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> global<CapDelegateState<Feature>>(addr).root == self.root;
+ensures !old(spec_has_delegate_cap<Feature>(addr)) ==> vector::spec_contains(spec_delegates<Feature>(self.root), addr);
 
@@ -695,7 +695,7 @@ Helper specification function to check whether a delegated capability exists at ### Function `revoke` -
public fun revoke<Feature>(cap: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
+
public fun revoke<Feature>(self: capability::Cap<Feature>, _feature_witness: &Feature, from: address)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/comparator.md b/aptos-move/framework/aptos-stdlib/doc/comparator.md index 1949f5812c5aa..babb592788b4b 100644 --- a/aptos-move/framework/aptos-stdlib/doc/comparator.md +++ b/aptos-move/framework/aptos-stdlib/doc/comparator.md @@ -92,7 +92,7 @@ Provides a framework for comparing two elements -
public fun is_equal(result: &comparator::Result): bool
+
public fun is_equal(self: &comparator::Result): bool
 
@@ -101,8 +101,8 @@ Provides a framework for comparing two elements Implementation -
public fun is_equal(result: &Result): bool {
-    result.inner == EQUAL
+
public fun is_equal(self: &Result): bool {
+    self.inner == EQUAL
 }
 
@@ -116,7 +116,7 @@ Provides a framework for comparing two elements -
public fun is_smaller_than(result: &comparator::Result): bool
+
public fun is_smaller_than(self: &comparator::Result): bool
 
@@ -125,8 +125,8 @@ Provides a framework for comparing two elements Implementation -
public fun is_smaller_than(result: &Result): bool {
-    result.inner == SMALLER
+
public fun is_smaller_than(self: &Result): bool {
+    self.inner == SMALLER
 }
 
@@ -140,7 +140,7 @@ Provides a framework for comparing two elements -
public fun is_greater_than(result: &comparator::Result): bool
+
public fun is_greater_than(self: &comparator::Result): bool
 
@@ -149,8 +149,8 @@ Provides a framework for comparing two elements Implementation -
public fun is_greater_than(result: &Result): bool {
-    result.inner == GREATER
+
public fun is_greater_than(self: &Result): bool {
+    self.inner == GREATER
 }
 
@@ -201,21 +201,21 @@ Provides a framework for comparing two elements
public fun compare_u8_vector(left: vector<u8>, right: vector<u8>): Result {
-    let left_length = vector::length(&left);
-    let right_length = vector::length(&right);
+    let left_length = left.length();
+    let right_length = right.length();
 
     let idx = 0;
 
     while (idx < left_length && idx < right_length) {
-        let left_byte = *vector::borrow(&left, idx);
-        let right_byte = *vector::borrow(&right, idx);
+        let left_byte = left[idx];
+        let right_byte = right[idx];
 
         if (left_byte < right_byte) {
             return Result { inner: SMALLER }
         } else if (left_byte > right_byte) {
             return Result { inner: GREATER }
         };
-        idx = idx + 1;
+        idx += 1;
     };
 
     if (left_length < right_length) {
@@ -268,14 +268,14 @@ Provides a framework for comparing two elements
 ### Function `is_equal`
 
 
-
public fun is_equal(result: &comparator::Result): bool
+
public fun is_equal(self: &comparator::Result): bool
 
aborts_if false;
-let res = result;
+let res = self;
 ensures result == (res.inner == EQUAL);
 
@@ -286,14 +286,14 @@ Provides a framework for comparing two elements ### Function `is_smaller_than` -
public fun is_smaller_than(result: &comparator::Result): bool
+
public fun is_smaller_than(self: &comparator::Result): bool
 
aborts_if false;
-let res = result;
+let res = self;
 ensures result == (res.inner == SMALLER);
 
@@ -304,14 +304,14 @@ Provides a framework for comparing two elements ### Function `is_greater_than` -
public fun is_greater_than(result: &comparator::Result): bool
+
public fun is_greater_than(self: &comparator::Result): bool
 
aborts_if false;
-let res = result;
+let res = self;
 ensures result == (res.inner == GREATER);
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/copyable_any.md b/aptos-move/framework/aptos-stdlib/doc/copyable_any.md index 7ac120437e652..ab34a75e7e8ed 100644 --- a/aptos-move/framework/aptos-stdlib/doc/copyable_any.md +++ b/aptos-move/framework/aptos-stdlib/doc/copyable_any.md @@ -110,7 +110,7 @@ also required from T. Unpack a value from the Any representation. This aborts if the value has not the expected type T. -
public fun unpack<T>(x: copyable_any::Any): T
+
public fun unpack<T>(self: copyable_any::Any): T
 
@@ -119,9 +119,9 @@ Unpack a value from the Any Implementation -
public fun unpack<T>(x: Any): T {
-    assert!(type_info::type_name<T>() == x.type_name, error::invalid_argument(ETYPE_MISMATCH));
-    from_bytes<T>(x.data)
+
public fun unpack<T>(self: Any): T {
+    assert!(type_info::type_name<T>() == self.type_name, error::invalid_argument(ETYPE_MISMATCH));
+    from_bytes<T>(self.data)
 }
 
@@ -136,7 +136,7 @@ Unpack a value from the Any Returns the type name of this Any -
public fun type_name(x: &copyable_any::Any): &string::String
+
public fun type_name(self: &copyable_any::Any): &string::String
 
@@ -145,8 +145,8 @@ Returns the type name of this Any Implementation -
public fun type_name(x: &Any): &String {
-    &x.type_name
+
public fun type_name(self: &Any): &String {
+    &self.type_name
 }
 
@@ -186,14 +186,14 @@ Returns the type name of this Any ### Function `unpack` -
public fun unpack<T>(x: copyable_any::Any): T
+
public fun unpack<T>(self: copyable_any::Any): T
 
include UnpackAbortsIf<T>;
-ensures result == from_bcs::deserialize<T>(x.data);
+ensures result == from_bcs::deserialize<T>(self.data);
 
@@ -203,9 +203,9 @@ Returns the type name of this Any
schema UnpackAbortsIf<T> {
-    x: Any;
-    aborts_if type_info::type_name<T>() != x.type_name;
-    aborts_if !from_bcs::deserializable<T>(x.data);
+    self: Any;
+    aborts_if type_info::type_name<T>() != self.type_name;
+    aborts_if !from_bcs::deserializable<T>(self.data);
 }
 
@@ -216,14 +216,14 @@ Returns the type name of this Any ### Function `type_name` -
public fun type_name(x: &copyable_any::Any): &string::String
+
public fun type_name(self: &copyable_any::Any): &string::String
 
aborts_if false;
-ensures result == x.type_name;
+ensures result == self.type_name;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/crypto_algebra.md b/aptos-move/framework/aptos-stdlib/doc/crypto_algebra.md index 58953506e7455..8bc3c6983ef16 100644 --- a/aptos-move/framework/aptos-stdlib/doc/crypto_algebra.md +++ b/aptos-move/framework/aptos-stdlib/doc/crypto_algebra.md @@ -874,7 +874,7 @@ NOTE: some hashing methods do not accept a dst and will abort if a
fun handles_from_elements<S>(elements: &vector<Element<S>>): vector<u64> {
-    let num_elements = std::vector::length(elements);
+    let num_elements = elements.length();
     let element_handles = std::vector::empty();
     let i = 0;
     while ({
@@ -884,8 +884,8 @@ NOTE: some hashing methods do not accept a dst and will abort if a
         };
         i < num_elements
     }) {
-        std::vector::push_back(&mut element_handles, std::vector::borrow(elements, i).handle);
-        i = i + 1;
+        element_handles.push_back(elements[i].handle);
+        i += 1;
     };
     element_handles
 }
diff --git a/aptos-move/framework/aptos-stdlib/doc/ed25519.md b/aptos-move/framework/aptos-stdlib/doc/ed25519.md
index ed3bb72bd34fe..05baf43129fae 100644
--- a/aptos-move/framework/aptos-stdlib/doc/ed25519.md
+++ b/aptos-move/framework/aptos-stdlib/doc/ed25519.md
@@ -243,7 +243,7 @@ Parses the input 32 bytes as an *unvalidated* Ed25519 public key.
 
 
 
public fun new_unvalidated_public_key_from_bytes(bytes: vector<u8>): UnvalidatedPublicKey {
-    assert!(std::vector::length(&bytes) == PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_WRONG_PUBKEY_SIZE));
+    assert!(bytes.length() == PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_WRONG_PUBKEY_SIZE));
     UnvalidatedPublicKey { bytes }
 }
 
@@ -300,7 +300,7 @@ Parses the input 64 bytes as a purported Ed25519 signature.
public fun new_signature_from_bytes(bytes: vector<u8>): Signature {
-    assert!(std::vector::length(&bytes) == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_WRONG_SIGNATURE_SIZE));
+    assert!(bytes.length() == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_WRONG_SIGNATURE_SIZE));
     Signature { bytes }
 }
 
@@ -620,7 +620,7 @@ Derives the Aptos-specific authentication key of the given Ed25519 public key.
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8> {
-    std::vector::push_back(&mut pk_bytes, SIGNATURE_SCHEME_ID);
+    pk_bytes.push_back(SIGNATURE_SCHEME_ID);
     std::hash::sha3_256(pk_bytes)
 }
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/federated_keyless.md b/aptos-move/framework/aptos-stdlib/doc/federated_keyless.md new file mode 100644 index 0000000000000..d76943181eefa --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/federated_keyless.md @@ -0,0 +1,215 @@ + + + +# Module `0x1::federated_keyless` + +This module implements the Federated Keyless authentication scheme. + + +- [Struct `PublicKey`](#0x1_federated_keyless_PublicKey) +- [Constants](#@Constants_0) +- [Function `new_public_key_from_bytes`](#0x1_federated_keyless_new_public_key_from_bytes) +- [Function `deserialize_public_key`](#0x1_federated_keyless_deserialize_public_key) +- [Function `new`](#0x1_federated_keyless_new) +- [Function `get_jwk_address`](#0x1_federated_keyless_get_jwk_address) +- [Function `get_keyless_public_key`](#0x1_federated_keyless_get_keyless_public_key) +- [Specification](#@Specification_1) + + +
use 0x1::bcs_stream;
+use 0x1::error;
+use 0x1::keyless;
+
+ + + + + +## Struct `PublicKey` + +An *unvalidated* any public key: not necessarily an elliptic curve point, just a sequence of 32 bytes + + +
struct PublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+jwk_address: address +
+
+ +
+
+keyless_public_key: keyless::PublicKey +
+
+ +
+
+ + +
+ + + +## Constants + + + + +There are extra bytes in the input when deserializing a Federated Keyless public key. + + +
const E_INVALID_FEDERATED_KEYLESS_PUBLIC_KEY_EXTRA_BYTES: u64 = 1;
+
+ + + + + +## Function `new_public_key_from_bytes` + +Parses the input bytes into a keyless public key. + + +
public fun new_public_key_from_bytes(bytes: vector<u8>): federated_keyless::PublicKey
+
+ + + +
+Implementation + + +
public fun new_public_key_from_bytes(bytes: vector<u8>): PublicKey {
+    let stream = bcs_stream::new(bytes);
+    let pk = deserialize_public_key(&mut stream);
+    assert!(!bcs_stream::has_remaining(&mut stream), error::invalid_argument(E_INVALID_FEDERATED_KEYLESS_PUBLIC_KEY_EXTRA_BYTES));
+    pk
+}
+
+ + + +
+ + + +## Function `deserialize_public_key` + +Deserializes a Federated Keyless public key from a BCS stream. + + +
public fun deserialize_public_key(stream: &mut bcs_stream::BCSStream): federated_keyless::PublicKey
+
+ + + +
+Implementation + + +
public fun deserialize_public_key(stream: &mut bcs_stream::BCSStream): PublicKey {
+    let jwk_address = bcs_stream::deserialize_address(stream);
+    let keyless_public_key = keyless::deserialize_public_key(stream);
+    PublicKey { keyless_public_key, jwk_address }
+}
+
+ + + +
+ + + +## Function `new` + +Creates a new Federated Keyless public key from a keyless public key and a JWK address. + + +
public fun new(keyless_public_key: keyless::PublicKey, jwk_address: address): federated_keyless::PublicKey
+
+ + + +
+Implementation + + +
public fun new(keyless_public_key: keyless::PublicKey, jwk_address: address): PublicKey {
+    PublicKey { keyless_public_key, jwk_address }
+}
+
+ + + +
+ + + +## Function `get_jwk_address` + +Returns the identifier bytes of the public key + + +
public(friend) fun get_jwk_address(self: &federated_keyless::PublicKey): address
+
+ + + +
+Implementation + + +
friend fun get_jwk_address(self: &PublicKey): address {
+    self.jwk_address
+}
+
+ + + +
+ + + +## Function `get_keyless_public_key` + +Returns the keyless public key of the public key + + +
public(friend) fun get_keyless_public_key(self: &federated_keyless::PublicKey): keyless::PublicKey
+
+ + + +
+Implementation + + +
friend fun get_keyless_public_key(self: &PublicKey): keyless::PublicKey {
+    self.keyless_public_key
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/fixed_point64.md b/aptos-move/framework/aptos-stdlib/doc/fixed_point64.md index 22571462f6c30..f1ef1361476c6 100644 --- a/aptos-move/framework/aptos-stdlib/doc/fixed_point64.md +++ b/aptos-move/framework/aptos-stdlib/doc/fixed_point64.md @@ -144,22 +144,22 @@ The multiplied value would be too large to be held in a u128 - + -Abort code on calculation result is negative. +The computed ratio when converting to a FixedPoint64 would be unrepresentable -
const ENEGATIVE_RESULT: u64 = 65542;
+
const ERATIO_OUT_OF_RANGE: u64 = 131077;
 
- + -The computed ratio when converting to a FixedPoint64 would be unrepresentable +Abort code on calculation result is negative. -
const ERATIO_OUT_OF_RANGE: u64 = 131077;
+
const ENEGATIVE_RESULT: u64 = 65542;
 
@@ -168,10 +168,10 @@ The computed ratio when converting to a sub(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64 +
public fun sub(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
 
@@ -180,9 +180,9 @@ Returns x - y. x must be not less than y. Implementation -
public fun sub(x: FixedPoint64, y: FixedPoint64): FixedPoint64 {
-    let x_raw = get_raw_value(x);
-    let y_raw = get_raw_value(y);
+
public fun sub(self: FixedPoint64, y: FixedPoint64): FixedPoint64 {
+    let x_raw = self.get_raw_value();
+    let y_raw = y.get_raw_value();
     assert!(x_raw >= y_raw, ENEGATIVE_RESULT);
     create_from_raw_value(x_raw - y_raw)
 }
@@ -196,10 +196,10 @@ Returns x - y. x must be not less than y.
 
 ## Function `add`
 
-Returns x + y. The result cannot be greater than MAX_U128.
+Returns self + y. The result cannot be greater than MAX_U128.
 
 
-
public fun add(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
public fun add(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
 
@@ -208,9 +208,9 @@ Returns x + y. The result cannot be greater than MAX_U128. Implementation -
public fun add(x: FixedPoint64, y: FixedPoint64): FixedPoint64 {
-    let x_raw = get_raw_value(x);
-    let y_raw = get_raw_value(y);
+
public fun add(self: FixedPoint64, y: FixedPoint64): FixedPoint64 {
+    let x_raw = self.get_raw_value();
+    let y_raw = y.get_raw_value();
     let result = (x_raw as u256) + (y_raw as u256);
     assert!(result <= MAX_U128, ERATIO_OUT_OF_RANGE);
     create_from_raw_value((result as u128))
@@ -372,7 +372,7 @@ adding or subtracting FixedPoint64 values, can be done using the raw
 values directly.
 
 
-
public fun get_raw_value(num: fixed_point64::FixedPoint64): u128
+
public fun get_raw_value(self: fixed_point64::FixedPoint64): u128
 
@@ -381,8 +381,8 @@ values directly. Implementation -
public fun get_raw_value(num: FixedPoint64): u128 {
-    num.value
+
public fun get_raw_value(self: FixedPoint64): u128 {
+    self.value
 }
 
@@ -397,7 +397,7 @@ values directly. Returns true if the ratio is zero. -
public fun is_zero(num: fixed_point64::FixedPoint64): bool
+
public fun is_zero(self: fixed_point64::FixedPoint64): bool
 
@@ -406,8 +406,8 @@ Returns true if the ratio is zero. Implementation -
public fun is_zero(num: FixedPoint64): bool {
-    num.value == 0
+
public fun is_zero(self: FixedPoint64): bool {
+    self.value == 0
 }
 
@@ -477,10 +477,10 @@ Returns the larger of the two FixedPoint64 numbers. ## Function `less_or_equal` -Returns true if num1 <= num2 +Returns true if self <= num2 -
public fun less_or_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun less_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -489,8 +489,8 @@ Returns true if num1 <= num2 Implementation -
public fun less_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value <= num2.value
+
public fun less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value <= num2.value
 }
 
@@ -502,10 +502,10 @@ Returns true if num1 <= num2 ## Function `less` -Returns true if num1 < num2 +Returns true if self < num2 -
public fun less(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun less(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -514,8 +514,8 @@ Returns true if num1 < num2 Implementation -
public fun less(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value < num2.value
+
public fun less(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value < num2.value
 }
 
@@ -527,10 +527,10 @@ Returns true if num1 < num2 ## Function `greater_or_equal` -Returns true if num1 >= num2 +Returns true if self >= num2 -
public fun greater_or_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun greater_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -539,8 +539,8 @@ Returns true if num1 >= num2 Implementation -
public fun greater_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value >= num2.value
+
public fun greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value >= num2.value
 }
 
@@ -552,10 +552,10 @@ Returns true if num1 >= num2 ## Function `greater` -Returns true if num1 > num2 +Returns true if self > num2 -
public fun greater(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun greater(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -564,8 +564,8 @@ Returns true if num1 > num2 Implementation -
public fun greater(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value > num2.value
+
public fun greater(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value > num2.value
 }
 
@@ -577,10 +577,10 @@ Returns true if num1 > num2 ## Function `equal` -Returns true if num1 = num2 +Returns true if self = num2 -
public fun equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -589,8 +589,8 @@ Returns true if num1 = num2 Implementation -
public fun equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-    num1.value == num2.value
+
public fun equal(self: FixedPoint64, num2: FixedPoint64): bool {
+    self.value == num2.value
 }
 
@@ -602,10 +602,10 @@ Returns true if num1 = num2 ## Function `almost_equal` -Returns true if num1 almost equals to num2, which means abs(num1-num2) <= precision +Returns true if self almost equals to num2, which means abs(num1-num2) <= precision -
public fun almost_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
+
public fun almost_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
 
@@ -614,11 +614,11 @@ Returns true if num1 almost equals to num2, which means abs(num1-num2) <= precis Implementation -
public fun almost_equal(num1: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
-    if (num1.value > num2.value) {
-        (num1.value - num2.value <= precision.value)
+
public fun almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
+    if (self.value > num2.value) {
+        (self.value - num2.value <= precision.value)
     } else {
-        (num2.value - num1.value <= precision.value)
+        (num2.value - self.value <= precision.value)
     }
 }
 
@@ -661,7 +661,7 @@ Create a fixedpoint value from a u128 value. Returns the largest integer less than or equal to a given number. -
public fun floor(num: fixed_point64::FixedPoint64): u128
+
public fun floor(self: fixed_point64::FixedPoint64): u128
 
@@ -670,8 +670,8 @@ Returns the largest integer less than or equal to a given number. Implementation -
public fun floor(num: FixedPoint64): u128 {
-    num.value >> 64
+
public fun floor(self: FixedPoint64): u128 {
+    self.value >> 64
 }
 
@@ -686,7 +686,7 @@ Returns the largest integer less than or equal to a given number. Rounds up the given FixedPoint64 to the next largest integer. -
public fun ceil(num: fixed_point64::FixedPoint64): u128
+
public fun ceil(self: fixed_point64::FixedPoint64): u128
 
@@ -695,9 +695,9 @@ Rounds up the given FixedPoint64 to the next largest integer. Implementation -
public fun ceil(num: FixedPoint64): u128 {
-    let floored_num = floor(num) << 64;
-    if (num.value == floored_num) {
+
public fun ceil(self: FixedPoint64): u128 {
+    let floored_num = self.floor() << 64;
+    if (self.value == floored_num) {
         return floored_num >> 64
     };
     let val = ((floored_num as u256) + (1 << 64));
@@ -716,7 +716,7 @@ Rounds up the given FixedPoint64 to the next largest integer.
 Returns the value of a FixedPoint64 to the nearest integer.
 
 
-
public fun round(num: fixed_point64::FixedPoint64): u128
+
public fun round(self: fixed_point64::FixedPoint64): u128
 
@@ -725,13 +725,13 @@ Returns the value of a FixedPoint64 to the nearest integer. Implementation -
public fun round(num: FixedPoint64): u128 {
-    let floored_num = floor(num) << 64;
+
public fun round(self: FixedPoint64): u128 {
+    let floored_num = self.floor() << 64;
     let boundary = floored_num + ((1 << 64) / 2);
-    if (num.value < boundary) {
+    if (self.value < boundary) {
         floored_num >> 64
     } else {
-        ceil(num)
+        self.ceil()
     }
 }
 
@@ -757,15 +757,15 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `sub` -
public fun sub(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
public fun sub(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
 
pragma opaque;
-aborts_if x.value < y.value with ENEGATIVE_RESULT;
-ensures result.value == x.value - y.value;
+aborts_if self.value < y.value with ENEGATIVE_RESULT;
+ensures result.value == self.value - y.value;
 
@@ -775,15 +775,15 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `add` -
public fun add(x: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
+
public fun add(self: fixed_point64::FixedPoint64, y: fixed_point64::FixedPoint64): fixed_point64::FixedPoint64
 
pragma opaque;
-aborts_if (x.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE;
-ensures result.value == x.value + y.value;
+aborts_if (self.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE;
+ensures result.value == self.value + y.value;
 
@@ -1010,7 +1010,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `less_or_equal` -
public fun less_or_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun less_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1018,7 +1018,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_less_or_equal(num1, num2);
+ensures result == spec_less_or_equal(self, num2);
 
@@ -1027,8 +1027,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_less_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value <= num2.value
+
fun spec_less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value <= num2.value
 }
 
@@ -1039,7 +1039,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `less` -
public fun less(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun less(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1047,7 +1047,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_less(num1, num2);
+ensures result == spec_less(self, num2);
 
@@ -1056,8 +1056,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_less(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value < num2.value
+
fun spec_less(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value < num2.value
 }
 
@@ -1068,7 +1068,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `greater_or_equal` -
public fun greater_or_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun greater_or_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1076,7 +1076,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_greater_or_equal(num1, num2);
+ensures result == spec_greater_or_equal(self, num2);
 
@@ -1085,8 +1085,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_greater_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value >= num2.value
+
fun spec_greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value >= num2.value
 }
 
@@ -1097,7 +1097,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `greater` -
public fun greater(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun greater(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1105,7 +1105,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_greater(num1, num2);
+ensures result == spec_greater(self, num2);
 
@@ -1114,8 +1114,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_greater(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value > num2.value
+
fun spec_greater(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value > num2.value
 }
 
@@ -1126,7 +1126,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `equal` -
public fun equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
+
public fun equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64): bool
 
@@ -1134,7 +1134,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_equal(num1, num2);
+ensures result == spec_equal(self, num2);
 
@@ -1143,8 +1143,8 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_equal(num1: FixedPoint64, num2: FixedPoint64): bool {
-   num1.value == num2.value
+
fun spec_equal(self: FixedPoint64, num2: FixedPoint64): bool {
+   self.value == num2.value
 }
 
@@ -1155,7 +1155,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `almost_equal` -
public fun almost_equal(num1: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
+
public fun almost_equal(self: fixed_point64::FixedPoint64, num2: fixed_point64::FixedPoint64, precision: fixed_point64::FixedPoint64): bool
 
@@ -1163,7 +1163,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_almost_equal(num1, num2, precision);
+ensures result == spec_almost_equal(self, num2, precision);
 
@@ -1172,11 +1172,11 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_almost_equal(num1: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
-   if (num1.value > num2.value) {
-       (num1.value - num2.value <= precision.value)
+
fun spec_almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool {
+   if (self.value > num2.value) {
+       (self.value - num2.value <= precision.value)
    } else {
-       (num2.value - num1.value <= precision.value)
+       (num2.value - self.value <= precision.value)
    }
 }
 
@@ -1230,7 +1230,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `floor` -
public fun floor(num: fixed_point64::FixedPoint64): u128
+
public fun floor(self: fixed_point64::FixedPoint64): u128
 
@@ -1238,7 +1238,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_floor(num);
+ensures result == spec_floor(self);
 
@@ -1247,12 +1247,12 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_floor(val: FixedPoint64): u128 {
-   let fractional = val.value % (1 << 64);
+
fun spec_floor(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
    if (fractional == 0) {
-       val.value >> 64
+       self.value >> 64
    } else {
-       (val.value - fractional) >> 64
+       (self.value - fractional) >> 64
    }
 }
 
@@ -1264,7 +1264,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `ceil` -
public fun ceil(num: fixed_point64::FixedPoint64): u128
+
public fun ceil(self: fixed_point64::FixedPoint64): u128
 
@@ -1273,7 +1273,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma verify_duration_estimate = 1000;
 pragma opaque;
 aborts_if false;
-ensures result == spec_ceil(num);
+ensures result == spec_ceil(self);
 
@@ -1282,13 +1282,13 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_ceil(val: FixedPoint64): u128 {
-   let fractional = val.value % (1 << 64);
+
fun spec_ceil(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
    let one = 1 << 64;
    if (fractional == 0) {
-       val.value >> 64
+       self.value >> 64
    } else {
-       (val.value - fractional + one) >> 64
+       (self.value - fractional + one) >> 64
    }
 }
 
@@ -1300,7 +1300,7 @@ Returns the value of a FixedPoint64 to the nearest integer. ### Function `round` -
public fun round(num: fixed_point64::FixedPoint64): u128
+
public fun round(self: fixed_point64::FixedPoint64): u128
 
@@ -1308,7 +1308,7 @@ Returns the value of a FixedPoint64 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_round(num);
+ensures result == spec_round(self);
 
@@ -1317,14 +1317,14 @@ Returns the value of a FixedPoint64 to the nearest integer. -
fun spec_round(val: FixedPoint64): u128 {
-   let fractional = val.value % (1 << 64);
+
fun spec_round(self: FixedPoint64): u128 {
+   let fractional = self.value % (1 << 64);
    let boundary = (1 << 64) / 2;
    let one = 1 << 64;
    if (fractional < boundary) {
-       (val.value - fractional) >> 64
+       (self.value - fractional) >> 64
    } else {
-       (val.value - fractional + one) >> 64
+       (self.value - fractional + one) >> 64
    }
 }
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/from_bcs.md b/aptos-move/framework/aptos-stdlib/doc/from_bcs.md index f0a71b713d1bf..b537e76008d7d 100644 --- a/aptos-move/framework/aptos-stdlib/doc/from_bcs.md +++ b/aptos-move/framework/aptos-stdlib/doc/from_bcs.md @@ -5,7 +5,7 @@ This module provides a number of functions to convert _primitive_ types from their representation in std::bcs to values. This is the opposite of bcs::to_bytes. Note that it is not safe to define a generic public from_bytes -function because this can violate implicit struct invariants, therefore only primitive types are offerred. If +function because this can violate implicit struct invariants, therefore only primitive types are offered. If a general conversion back-and-force is needed, consider the aptos_std::Any type which preserves invariants. Example: @@ -287,7 +287,7 @@ UTF8 check failed in conversion from bytes to string
public fun to_string(v: vector<u8>): String {
     // To make this safe, we need to evaluate the utf8 invariant.
     let s = from_bytes<String>(v);
-    assert!(string::internal_check_utf8(string::bytes(&s)), EINVALID_UTF8);
+    assert!(string::internal_check_utf8(s.bytes()), EINVALID_UTF8);
     s
 }
 
@@ -306,6 +306,8 @@ Note that this function does not put any constraint on T. If code u deserialize a linear value, its their responsibility that the data they deserialize is owned. +Function would abort if T has signer in it. +
public(friend) fun from_bytes<T>(bytes: vector<u8>): T
 
@@ -316,7 +318,7 @@ owned. Implementation -
public(friend) native fun from_bytes<T>(bytes: vector<u8>): T;
+
native friend fun from_bytes<T>(bytes: vector<u8>): T;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/keyless.md b/aptos-move/framework/aptos-stdlib/doc/keyless.md new file mode 100644 index 0000000000000..63ba0e2d879a0 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/keyless.md @@ -0,0 +1,257 @@ + + + +# Module `0x1::keyless` + +This module implements the Keyless authentication scheme. + + +- [Struct `PublicKey`](#0x1_keyless_PublicKey) +- [Constants](#@Constants_0) +- [Function `new_public_key_from_bytes`](#0x1_keyless_new_public_key_from_bytes) +- [Function `deserialize_public_key`](#0x1_keyless_deserialize_public_key) +- [Function `new`](#0x1_keyless_new) +- [Function `get_iss`](#0x1_keyless_get_iss) +- [Function `get_idc`](#0x1_keyless_get_idc) +- [Specification](#@Specification_1) + + +
use 0x1::bcs_stream;
+use 0x1::error;
+use 0x1::string;
+
+ + + + + +## Struct `PublicKey` + +An *unvalidated* any public key: not necessarily an elliptic curve point, just a sequence of 32 bytes + + +
struct PublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+iss: string::String +
+
+ +
+
+idc: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The length of the identifier commitment bytes in a Keyless public key is invalid. + + +
const E_INVALID_ID_COMMITMENT_BYTES_LENGTH: u64 = 2;
+
+ + + + + +The length of the issuer string in a Keyless public key is invalid. + + +
const E_INVALID_ISSUER_UTF8_BYTES_LENGTH: u64 = 3;
+
+ + + + + +There are extra bytes in the input when deserializing a Keyless public key. + + +
const E_INVALID_KEYLESS_PUBLIC_KEY_EXTRA_BYTES: u64 = 1;
+
+ + + + + +The length of the identifier commitment bytes in a Keyless public key. + + +
const ID_COMMITMENT_BYTES_LENGTH: u64 = 32;
+
+ + + + + +The maximum length of the issuer string in bytes in a Keyless public key. + + +
const MAX_ISSUER_UTF8_BYTES_LENGTH: u64 = 120;
+
+ + + + + +## Function `new_public_key_from_bytes` + +Parses the input bytes into a keyless public key. + + +
public fun new_public_key_from_bytes(bytes: vector<u8>): keyless::PublicKey
+
+ + + +
+Implementation + + +
public fun new_public_key_from_bytes(bytes: vector<u8>): PublicKey {
+    let stream = bcs_stream::new(bytes);
+    let key = deserialize_public_key(&mut stream);
+    assert!(!bcs_stream::has_remaining(&mut stream), error::invalid_argument(E_INVALID_KEYLESS_PUBLIC_KEY_EXTRA_BYTES));
+    key
+}
+
+ + + +
+ + + +## Function `deserialize_public_key` + +Deserializes a keyless public key from a BCS stream. + + +
public fun deserialize_public_key(stream: &mut bcs_stream::BCSStream): keyless::PublicKey
+
+ + + +
+Implementation + + +
public fun deserialize_public_key(stream: &mut bcs_stream::BCSStream): PublicKey {
+    let iss = bcs_stream::deserialize_string(stream);
+    let idc = bcs_stream::deserialize_vector(stream, |x| deserialize_u8(x));
+    new(iss, idc)
+}
+
+ + + +
+ + + +## Function `new` + +Creates a new keyless public key from an issuer string and an identifier bytes. + + +
public fun new(iss: string::String, idc: vector<u8>): keyless::PublicKey
+
+ + + +
+Implementation + + +
public fun new(iss: String, idc: vector<u8>): PublicKey {
+    assert!(string::bytes(&iss).length() <= MAX_ISSUER_UTF8_BYTES_LENGTH, error::invalid_argument(E_INVALID_ISSUER_UTF8_BYTES_LENGTH));
+    assert!(idc.length() == ID_COMMITMENT_BYTES_LENGTH, error::invalid_argument(E_INVALID_ID_COMMITMENT_BYTES_LENGTH));
+    PublicKey { iss, idc }
+}
+
+ + + +
+ + + +## Function `get_iss` + +Returns the issuer string of the public key + + +
public(friend) fun get_iss(self: &keyless::PublicKey): string::String
+
+ + + +
+Implementation + + +
friend fun get_iss(self: &PublicKey): String {
+    self.iss
+}
+
+ + + +
+ + + +## Function `get_idc` + +Returns the identifier bytes of the public key + + +
public(friend) fun get_idc(self: &keyless::PublicKey): vector<u8>
+
+ + + +
+Implementation + + +
friend fun get_idc(self: &PublicKey): vector<u8> {
+    self.idc
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/math128.md b/aptos-move/framework/aptos-stdlib/doc/math128.md index 0160568fc30b7..616a353e90b90 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math128.md +++ b/aptos-move/framework/aptos-stdlib/doc/math128.md @@ -11,6 +11,7 @@ Standard math utilities missing in the Move Language. - [Function `min`](#0x1_math128_min) - [Function `average`](#0x1_math128_average) - [Function `gcd`](#0x1_math128_gcd) +- [Function `lcm`](#0x1_math128_lcm) - [Function `mul_div`](#0x1_math128_mul_div) - [Function `clamp`](#0x1_math128_clamp) - [Function `pow`](#0x1_math128_pow) @@ -159,6 +160,35 @@ Return greatest common divisor of a & b, via the Eucli + + + + +## Function `lcm` + +Return least common multiple of a & b + + +
public fun lcm(a: u128, b: u128): u128
+
+ + + +
+Implementation + + +
public inline fun lcm(a: u128, b: u128): u128 {
+    if (a == 0 || b == 0) {
+        0
+    } else {
+        a / gcd(a, b) * b
+    }
+}
+
+ + +
@@ -236,10 +266,10 @@ Return the value of n raised to power e let p = 1; while (e > 1) { if (e % 2 == 1) { - p = p * n; + p *= n; }; - e = e / 2; - n = n * n; + e /= 2; + n *= n; }; p * n } @@ -273,10 +303,10 @@ Returns floor(log2(x)) let n = 64; while (n > 0) { if (x >= (1 << n)) { - x = x >> n; - res = res + n; + x >>= n; + res += n; }; - n = n >> 1; + n >>= 1; }; res } @@ -305,9 +335,9 @@ Returns floor(log2(x)) let integer_part = floor_log2(x); // Normalize x to [1, 2) in fixed point 32. if (x >= 1 << 32) { - x = x >> (integer_part - 32); + x >>= (integer_part - 32); } else { - x = x << (32 - integer_part); + x <<= (32 - integer_part); }; let frac = 0; let delta = 1 << 31; @@ -317,8 +347,8 @@ Returns floor(log2(x)) x = (x * x) >> 32; // x is now in [1, 4) // if x in [2, 4) then log x = 1 + log (x / 2) - if (x >= (2 << 32)) { frac = frac + delta; x = x >> 1; }; - delta = delta >> 1; + if (x >= (2 << 32)) { frac += delta; x >>= 1; }; + delta >>= 1; }; fixed_point32::create_from_raw_value (((integer_part as u64) << 32) + frac) } @@ -347,9 +377,9 @@ Returns floor(log2(x)) let integer_part = floor_log2(x); // Normalize x to [1, 2) in fixed point 63. To ensure x is smaller then 1<<64 if (x >= 1 << 63) { - x = x >> (integer_part - 63); + x >>= (integer_part - 63); } else { - x = x << (63 - integer_part); + x <<= (63 - integer_part); }; let frac = 0; let delta = 1 << 63; @@ -359,8 +389,8 @@ Returns floor(log2(x)) x = (x * x) >> 63; // x is now in [1, 4) // if x in [2, 4) then log x = 1 + log (x / 2) - if (x >= (2 << 63)) { frac = frac + delta; x = x >> 1; }; - delta = delta >> 1; + if (x >= (2 << 63)) { frac += delta; x >>= 1; }; + delta >>= 1; }; fixed_point64::create_from_raw_value (((integer_part as u128) << 64) + frac) } diff --git a/aptos-move/framework/aptos-stdlib/doc/math64.md b/aptos-move/framework/aptos-stdlib/doc/math64.md index b40a67a25860f..f20821ac6fe41 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math64.md +++ b/aptos-move/framework/aptos-stdlib/doc/math64.md @@ -11,6 +11,7 @@ Standard math utilities missing in the Move Language. - [Function `min`](#0x1_math64_min) - [Function `average`](#0x1_math64_average) - [Function `gcd`](#0x1_math64_gcd) +- [Function `lcm`](#0x1_math64_lcm) - [Function `mul_div`](#0x1_math64_mul_div) - [Function `clamp`](#0x1_math64_clamp) - [Function `pow`](#0x1_math64_pow) @@ -157,6 +158,35 @@ Return greatest common divisor of a & b, via the Eucli + + + + +## Function `lcm` + +Returns least common multiple of a & b. + + +
public fun lcm(a: u64, b: u64): u64
+
+ + + +
+Implementation + + +
public inline fun lcm(a: u64, b: u64): u64 {
+    if (a == 0 || b == 0) {
+        0
+    } else {
+        a / gcd(a, b) * b
+    }
+}
+
+ + +
@@ -234,10 +264,10 @@ Return the value of n raised to power e let p = 1; while (e > 1) { if (e % 2 == 1) { - p = p * n; + p *= n; }; - e = e / 2; - n = n * n; + e /= 2; + n *= n; }; p * n } @@ -271,10 +301,10 @@ Returns floor(lg2(x)) let n = 32; while (n > 0) { if (x >= (1 << n)) { - x = x >> n; - res = res + n; + x >>= n; + res += n; }; - n = n >> 1; + n >>= 1; }; res } @@ -315,8 +345,8 @@ Returns floor(lg2(x)) y = (y * y) >> 32; // x is now in [1, 4) // if x in [2, 4) then log x = 1 + log (x / 2) - if (y >= (2 << 32)) { frac = frac + delta; y = y >> 1; }; - delta = delta >> 1; + if (y >= (2 << 32)) { frac += delta; y >>= 1; }; + delta >>= 1; }; fixed_point32::create_from_raw_value (((integer_part as u64) << 32) + frac) } diff --git a/aptos-move/framework/aptos-stdlib/doc/math_fixed.md b/aptos-move/framework/aptos-stdlib/doc/math_fixed.md index e4530d7222d2d..ef1715e565068 100644 --- a/aptos-move/framework/aptos-stdlib/doc/math_fixed.md +++ b/aptos-move/framework/aptos-stdlib/doc/math_fixed.md @@ -75,7 +75,7 @@ Square root of fixed point number
public fun sqrt(x: FixedPoint32): FixedPoint32 {
-    let y = (fixed_point32::get_raw_value(x) as u128);
+    let y = (x.get_raw_value() as u128);
     fixed_point32::create_from_raw_value((math128::sqrt(y << 32) as u64))
 }
 
@@ -101,7 +101,7 @@ Exponent function with a precission of 9 digits.
public fun exp(x: FixedPoint32): FixedPoint32 {
-    let raw_value = (fixed_point32::get_raw_value(x) as u128);
+    let raw_value = (x.get_raw_value() as u128);
     fixed_point32::create_from_raw_value((exp_raw(raw_value) as u64))
 }
 
@@ -128,7 +128,7 @@ is positive for all values of x.
public fun log2_plus_32(x: FixedPoint32): FixedPoint32 {
-    let raw_value = (fixed_point32::get_raw_value(x) as u128);
+    let raw_value = (x.get_raw_value() as u128);
     math128::log2(raw_value)
 }
 
@@ -153,8 +153,8 @@ is positive for all values of x.
public fun ln_plus_32ln2(x: FixedPoint32): FixedPoint32 {
-    let raw_value = (fixed_point32::get_raw_value(x) as u128);
-    let x = (fixed_point32::get_raw_value(math128::log2(raw_value)) as u128);
+    let raw_value = (x.get_raw_value() as u128);
+    let x = (math128::log2(raw_value).get_raw_value() as u128);
     fixed_point32::create_from_raw_value((x * LN2 >> 32 as u64))
 }
 
@@ -180,7 +180,7 @@ Integer power of a fixed point number
public fun pow(x: FixedPoint32, n: u64): FixedPoint32 {
-    let raw_value = (fixed_point32::get_raw_value(x) as u128);
+    let raw_value = (x.get_raw_value() as u128);
     fixed_point32::create_from_raw_value((pow_raw(raw_value, (n as u128)) as u64))
 }
 
@@ -206,9 +206,9 @@ Specialized function for x * y / z that omits intermediate shifting
public fun mul_div(x: FixedPoint32, y: FixedPoint32, z: FixedPoint32): FixedPoint32 {
-    let a = fixed_point32::get_raw_value(x);
-    let b = fixed_point32::get_raw_value(y);
-    let c = fixed_point32::get_raw_value(z);
+    let a = x.get_raw_value();
+    let b = y.get_raw_value();
+    let c = z.get_raw_value();
     fixed_point32::create_from_raw_value (math64::mul_div(a, b, c))
 }
 
@@ -248,7 +248,7 @@ Specialized function for x * y / z that omits intermediate shifting // This has an error of 5000 / 4 10^9 roughly 6 digits of precission let power = pow_raw(roottwo, exponent); let eps_correction = 1241009291; - power = power + ((power * eps_correction * exponent) >> 64); + power += ((power * eps_correction * exponent) >> 64); // x is fixed point number smaller than 595528/2^32 < 0.00014 so we need only 2 tayler steps // to get the 6 digits of precission let taylor1 = (power * x) >> (32 - shift); @@ -279,12 +279,12 @@ Specialized function for x * y / z that omits intermediate shifting
fun pow_raw(x: u128, n: u128): u128 {
     let res: u256 = 1 << 64;
-    x = x << 32;
+    x <<= 32;
     while (n != 0) {
         if (n & 1 != 0) {
             res = (res * (x as u256)) >> 64;
         };
-        n = n >> 1;
+        n >>= 1;
         x = ((((x as u256) * (x as u256)) >> 64) as u128);
     };
     ((res >> 32) as u128)
diff --git a/aptos-move/framework/aptos-stdlib/doc/math_fixed64.md b/aptos-move/framework/aptos-stdlib/doc/math_fixed64.md
index 1db0e7f736a2e..9dbbfa1108700 100644
--- a/aptos-move/framework/aptos-stdlib/doc/math_fixed64.md
+++ b/aptos-move/framework/aptos-stdlib/doc/math_fixed64.md
@@ -66,7 +66,7 @@ Square root of fixed point number
 
 
 
public fun sqrt(x: FixedPoint64): FixedPoint64 {
-    let y = fixed_point64::get_raw_value(x);
+    let y = x.get_raw_value();
     let z = (math128::sqrt(y) << 32 as u256);
     z = (z + ((y as u256) << 64) / z) >> 1;
     fixed_point64::create_from_raw_value((z as u128))
@@ -94,7 +94,7 @@ Exponent function with a precission of 9 digits.
 
 
 
public fun exp(x: FixedPoint64): FixedPoint64 {
-    let raw_value = (fixed_point64::get_raw_value(x) as u256);
+    let raw_value = (x.get_raw_value() as u256);
     fixed_point64::create_from_raw_value((exp_raw(raw_value) as u128))
 }
 
@@ -121,7 +121,7 @@ is positive for all values of x.
public fun log2_plus_64(x: FixedPoint64): FixedPoint64 {
-    let raw_value = (fixed_point64::get_raw_value(x) as u128);
+    let raw_value = (x.get_raw_value());
     math128::log2_64(raw_value)
 }
 
@@ -146,8 +146,8 @@ is positive for all values of x.
public fun ln_plus_32ln2(x: FixedPoint64): FixedPoint64 {
-    let raw_value = fixed_point64::get_raw_value(x);
-    let x = (fixed_point64::get_raw_value(math128::log2_64(raw_value)) as u256);
+    let raw_value = x.get_raw_value();
+    let x = (math128::log2_64(raw_value).get_raw_value() as u256);
     fixed_point64::create_from_raw_value(((x * LN2) >> 64 as u128))
 }
 
@@ -173,7 +173,7 @@ Integer power of a fixed point number
public fun pow(x: FixedPoint64, n: u64): FixedPoint64 {
-    let raw_value = (fixed_point64::get_raw_value(x) as u256);
+    let raw_value = (x.get_raw_value() as u256);
     fixed_point64::create_from_raw_value((pow_raw(raw_value, (n as u128)) as u128))
 }
 
@@ -199,9 +199,9 @@ Specialized function for x * y / z that omits intermediate shifting
public fun mul_div(x: FixedPoint64, y: FixedPoint64, z: FixedPoint64): FixedPoint64 {
-    let a = fixed_point64::get_raw_value(x);
-    let b = fixed_point64::get_raw_value(y);
-    let c = fixed_point64::get_raw_value(z);
+    let a = x.get_raw_value();
+    let b = y.get_raw_value();
+    let c = z.get_raw_value();
     fixed_point64::create_from_raw_value (math128::mul_div(a, b, c))
 }
 
@@ -241,7 +241,7 @@ Specialized function for x * y / z that omits intermediate shifting // 2^(1/580) = roottwo(1 - eps), so the number we seek is roottwo^exponent (1 - eps * exponent) let power = pow_raw(roottwo, (exponent as u128)); let eps_correction = 219071715585908898; - power = power - ((power * eps_correction * exponent) >> 128); + power -= ((power * eps_correction * exponent) >> 128); // x is fixed point number smaller than bigfactor/2^64 < 0.0011 so we need only 5 tayler steps // to get the 15 digits of precission let taylor1 = (power * x) >> (64 - shift); @@ -279,7 +279,7 @@ Specialized function for x * y / z that omits intermediate shifting if (n & 1 != 0) { res = (res * x) >> 64; }; - n = n >> 1; + n >>= 1; x = (x * x) >> 64; }; res diff --git a/aptos-move/framework/aptos-stdlib/doc/multi_ed25519.md b/aptos-move/framework/aptos-stdlib/doc/multi_ed25519.md index 72f0be430bed1..e850e22a8f143 100644 --- a/aptos-move/framework/aptos-stdlib/doc/multi_ed25519.md +++ b/aptos-move/framework/aptos-stdlib/doc/multi_ed25519.md @@ -291,7 +291,7 @@ rejected during signature verification.
public fun new_unvalidated_public_key_from_bytes(bytes: vector<u8>): UnvalidatedPublicKey {
-    let len = vector::length(&bytes);
+    let len = bytes.length();
     let num_sub_pks = len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
 
     assert!(num_sub_pks <= MAX_NUMBER_OF_PUBLIC_KEYS, error::invalid_argument(E_WRONG_PUBKEY_SIZE));
@@ -324,7 +324,7 @@ DEPRECATED: Use new_validated_public_key_from_bytes_v2 instead. See
 
 
public fun new_validated_public_key_from_bytes(bytes: vector<u8>): Option<ValidatedPublicKey> {
     // Note that `public_key_validate_internal` will check that `vector::length(&bytes) / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES <= MAX_NUMBER_OF_PUBLIC_KEYS`.
-    if (vector::length(&bytes) % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES == THRESHOLD_SIZE_BYTES &&
+    if (bytes.length() % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES == THRESHOLD_SIZE_BYTES &&
         public_key_validate_internal(bytes)) {
         option::some(ValidatedPublicKey {
             bytes
@@ -391,7 +391,8 @@ Parses the input bytes as a purported MultiEd25519 multi-signature.
 
 
 
public fun new_signature_from_bytes(bytes: vector<u8>): Signature {
-    assert!(vector::length(&bytes) % INDIVIDUAL_SIGNATURE_NUM_BYTES == BITMAP_NUM_OF_BYTES, error::invalid_argument(E_WRONG_SIGNATURE_SIZE));
+    assert!(
+        bytes.length() % INDIVIDUAL_SIGNATURE_NUM_BYTES == BITMAP_NUM_OF_BYTES, error::invalid_argument(E_WRONG_SIGNATURE_SIZE));
     Signature { bytes }
 }
 
@@ -688,7 +689,7 @@ when the input pk is known to be valid.
public fun unvalidated_public_key_num_sub_pks(pk: &UnvalidatedPublicKey): u8 {
-    let len = vector::length(&pk.bytes);
+    let len = pk.bytes.length();
 
     ((len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES) as u8)
 }
@@ -767,7 +768,7 @@ Since the format of this PK has been validated, the returned # of sub-PKs is gua
 
 
 
public fun validated_public_key_num_sub_pks(pk: &ValidatedPublicKey): u8 {
-    let len = vector::length(&pk.bytes);
+    let len = pk.bytes.length();
 
     ((len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES) as u8)
 }
@@ -794,8 +795,8 @@ Returns the number t of sub-PKs in a validated t-out-of-n MultiEd25519 PK (i.e.,
 
 
 
public fun validated_public_key_threshold(pk: &ValidatedPublicKey): u8 {
-    let len = vector::length(&pk.bytes);
-    let threshold_byte = *vector::borrow(&pk.bytes, len - 1);
+    let len = pk.bytes.length();
+    let threshold_byte = pk.bytes[len - 1];
 
     threshold_byte
 }
@@ -824,14 +825,14 @@ Returns the threshold t <= n of the PK.
 
 
 
public fun check_and_get_threshold(bytes: vector<u8>): Option<u8> {
-    let len = vector::length(&bytes);
+    let len = bytes.length();
     if (len == 0) {
         return option::none<u8>()
     };
 
     let threshold_num_of_bytes = len % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
     let num_of_keys = len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES;
-    let threshold_byte = *vector::borrow(&bytes, len - 1);
+    let threshold_byte = bytes[len - 1];
 
     if (num_of_keys == 0 || num_of_keys > MAX_NUMBER_OF_PUBLIC_KEYS || threshold_num_of_bytes != 1) {
         return option::none<u8>()
@@ -864,7 +865,7 @@ Derives the Aptos-specific authentication key of the given Ed25519 public key.
 
 
 
fun public_key_bytes_to_authentication_key(pk_bytes: vector<u8>): vector<u8> {
-    vector::push_back(&mut pk_bytes, SIGNATURE_SCHEME_ID);
+    pk_bytes.push_back(SIGNATURE_SCHEME_ID);
     std::hash::sha3_256(pk_bytes)
 }
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/multi_key.md b/aptos-move/framework/aptos-stdlib/doc/multi_key.md new file mode 100644 index 0000000000000..a2db79301f978 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/multi_key.md @@ -0,0 +1,262 @@ + + + +# Module `0x1::multi_key` + +This module implements MultiKey type of public key. +A MultiKey public key is a collection of single key public keys and a number representing the number of signatures required to authenticate a transaction. +Unlike MultiEd25519, the individual single keys can be of different schemes. + + +- [Struct `MultiKey`](#0x1_multi_key_MultiKey) +- [Constants](#@Constants_0) +- [Function `new_public_key_from_bytes`](#0x1_multi_key_new_public_key_from_bytes) +- [Function `new_multi_key_from_single_keys`](#0x1_multi_key_new_multi_key_from_single_keys) +- [Function `deserialize_multi_key`](#0x1_multi_key_deserialize_multi_key) +- [Function `to_authentication_key`](#0x1_multi_key_to_authentication_key) +- [Specification](#@Specification_1) + + +
use 0x1::bcs;
+use 0x1::bcs_stream;
+use 0x1::error;
+use 0x1::hash;
+use 0x1::single_key;
+
+ + + + + +## Struct `MultiKey` + +An *unvalidated*, k out of n MultiKey public key. The bytes field contains (1) a vector of single key public keys and +(2) a single byte encoding the threshold k. +*Unvalidated* means there is no guarantee that the underlying PKs are valid elliptic curve points of non-small +order. Nor is there a guarantee that it would deserialize correctly (i.e., for Keyless public keys). + + +
struct MultiKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+public_keys: vector<single_key::AnyPublicKey> +
+
+ +
+
+signatures_required: u8 +
+
+ +
+
+ + +
+ + + +## Constants + + + + +The identifier of the MultiEd25519 signature scheme, which is used when deriving Aptos authentication keys by hashing +it together with an MultiEd25519 public key. + + +
const SIGNATURE_SCHEME_ID: u8 = 3;
+
+ + + + + +Max number of ed25519 public keys allowed in multi-ed25519 keys + + +
const MAX_NUMBER_OF_PUBLIC_KEYS: u64 = 32;
+
+ + + + + +There are extra bytes in the input when deserializing a MultiKey public key. + + +
const E_INVALID_MULTI_KEY_EXTRA_BYTES: u64 = 4;
+
+ + + + + +No keys were provided when creating a MultiKey public key. + + +
const E_INVALID_MULTI_KEY_NO_KEYS: u64 = 1;
+
+ + + + + +The number of signatures required is greater than the number of keys provided. + + +
const E_INVALID_MULTI_KEY_SIGNATURES_REQUIRED: u64 = 3;
+
+ + + + + +The number of keys provided is greater than the maximum allowed. + + +
const E_INVALID_MULTI_KEY_TOO_MANY_KEYS: u64 = 2;
+
+ + + + + +## Function `new_public_key_from_bytes` + +Parses the input bytes into a MultiKey public key. + + +
public fun new_public_key_from_bytes(bytes: vector<u8>): multi_key::MultiKey
+
+ + + +
+Implementation + + +
public fun new_public_key_from_bytes(bytes: vector<u8>): MultiKey {
+    let stream = bcs_stream::new(bytes);
+    let pk = deserialize_multi_key(&mut stream);
+    assert!(!bcs_stream::has_remaining(&mut stream), error::invalid_argument(E_INVALID_MULTI_KEY_EXTRA_BYTES));
+    pk
+}
+
+ + + +
+ + + +## Function `new_multi_key_from_single_keys` + +Creates a new MultiKey public key from a vector of single key public keys and a number representing the number of signatures required to authenticate a transaction. + + +
public fun new_multi_key_from_single_keys(single_keys: vector<single_key::AnyPublicKey>, signatures_required: u8): multi_key::MultiKey
+
+ + + +
+Implementation + + +
public fun new_multi_key_from_single_keys(single_keys: vector<single_key::AnyPublicKey>, signatures_required: u8): MultiKey {
+    let num_keys = single_keys.length();
+    assert!(
+        num_keys > 0,
+        error::invalid_argument(E_INVALID_MULTI_KEY_NO_KEYS)
+    );
+    assert!(
+        num_keys <= MAX_NUMBER_OF_PUBLIC_KEYS,
+        error::invalid_argument(E_INVALID_MULTI_KEY_TOO_MANY_KEYS)
+    );
+    assert!(
+        (signatures_required as u64) <= num_keys,
+        error::invalid_argument(E_INVALID_MULTI_KEY_SIGNATURES_REQUIRED)
+    );
+    MultiKey { public_keys: single_keys, signatures_required }
+}
+
+ + + +
+ + + +## Function `deserialize_multi_key` + +Deserializes a MultiKey public key from a BCS stream. + + +
public fun deserialize_multi_key(stream: &mut bcs_stream::BCSStream): multi_key::MultiKey
+
+ + + +
+Implementation + + +
public fun deserialize_multi_key(stream: &mut bcs_stream::BCSStream): MultiKey {
+    let public_keys = bcs_stream::deserialize_vector(stream, |x| single_key::deserialize_any_public_key(x));
+    let signatures_required = bcs_stream::deserialize_u8(stream);
+    MultiKey { public_keys, signatures_required }
+}
+
+ + + +
+ + + +## Function `to_authentication_key` + +Returns the authentication key for a MultiKey public key. + + +
public fun to_authentication_key(self: &multi_key::MultiKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun to_authentication_key(self: &MultiKey): vector<u8> {
+    let pk_bytes = bcs::to_bytes(self);
+    pk_bytes.push_back(SIGNATURE_SCHEME_ID);
+    hash::sha3_256(pk_bytes)
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/overview.md b/aptos-move/framework/aptos-stdlib/doc/overview.md index 6176385db1d97..a19d30c67cd5e 100644 --- a/aptos-move/framework/aptos-stdlib/doc/overview.md +++ b/aptos-move/framework/aptos-stdlib/doc/overview.md @@ -14,6 +14,7 @@ This is the reference documentation of the Aptos standard library. - [`0x1::any`](any.md#0x1_any) - [`0x1::aptos_hash`](hash.md#0x1_aptos_hash) +- [`0x1::bcs_stream`](bcs_stream.md#0x1_bcs_stream) - [`0x1::big_vector`](big_vector.md#0x1_big_vector) - [`0x1::bls12381`](bls12381.md#0x1_bls12381) - [`0x1::bls12381_algebra`](bls12381_algebra.md#0x1_bls12381_algebra) @@ -24,13 +25,16 @@ This is the reference documentation of the Aptos standard library. - [`0x1::crypto_algebra`](crypto_algebra.md#0x1_crypto_algebra) - [`0x1::debug`](debug.md#0x1_debug) - [`0x1::ed25519`](ed25519.md#0x1_ed25519) +- [`0x1::federated_keyless`](federated_keyless.md#0x1_federated_keyless) - [`0x1::fixed_point64`](fixed_point64.md#0x1_fixed_point64) - [`0x1::from_bcs`](from_bcs.md#0x1_from_bcs) +- [`0x1::keyless`](keyless.md#0x1_keyless) - [`0x1::math128`](math128.md#0x1_math128) - [`0x1::math64`](math64.md#0x1_math64) - [`0x1::math_fixed`](math_fixed.md#0x1_math_fixed) - [`0x1::math_fixed64`](math_fixed64.md#0x1_math_fixed64) - [`0x1::multi_ed25519`](multi_ed25519.md#0x1_multi_ed25519) +- [`0x1::multi_key`](multi_key.md#0x1_multi_key) - [`0x1::pool_u64`](pool_u64.md#0x1_pool_u64) - [`0x1::pool_u64_unbound`](pool_u64_unbound.md#0x1_pool_u64_unbound) - [`0x1::ristretto255`](ristretto255.md#0x1_ristretto255) @@ -38,9 +42,12 @@ This is the reference documentation of the Aptos standard library. - [`0x1::ristretto255_elgamal`](ristretto255_elgamal.md#0x1_ristretto255_elgamal) - [`0x1::ristretto255_pedersen`](ristretto255_pedersen.md#0x1_ristretto255_pedersen) - [`0x1::secp256k1`](secp256k1.md#0x1_secp256k1) +- [`0x1::secp256r1`](secp256r1.md#0x1_secp256r1) - [`0x1::simple_map`](simple_map.md#0x1_simple_map) +- [`0x1::single_key`](single_key.md#0x1_single_key) - [`0x1::smart_table`](smart_table.md#0x1_smart_table) - [`0x1::smart_vector`](smart_vector.md#0x1_smart_vector) +- [`0x1::storage_slots_allocator`](storage_slots_allocator.md#0x1_storage_slots_allocator) - [`0x1::string_utils`](string_utils.md#0x1_string_utils) - [`0x1::table`](table.md#0x1_table) - [`0x1::table_with_length`](table_with_length.md#0x1_table_with_length) diff --git a/aptos-move/framework/aptos-stdlib/doc/pool_u64.md b/aptos-move/framework/aptos-stdlib/doc/pool_u64.md index a9e3d17c22eb1..74cd90fe89970 100644 --- a/aptos-move/framework/aptos-stdlib/doc/pool_u64.md +++ b/aptos-move/framework/aptos-stdlib/doc/pool_u64.md @@ -43,7 +43,6 @@ shares left. - [Function `shares_to_amount`](#0x1_pool_u64_shares_to_amount) - [Function `shares_to_amount_with_total_coins`](#0x1_pool_u64_shares_to_amount_with_total_coins) - [Function `multiply_then_divide`](#0x1_pool_u64_multiply_then_divide) -- [Function `to_u128`](#0x1_pool_u64_to_u128) - [Specification](#@Specification_1) - [Struct `Pool`](#@Specification_1_Pool) - [Function `contains`](#@Specification_1_contains) @@ -299,7 +298,7 @@ Create a new pool with custom scaling_factor. Destroy an empty pool. This will fail if the pool has any balance of coins. -
public fun destroy_empty(pool: pool_u64::Pool)
+
public fun destroy_empty(self: pool_u64::Pool)
 
@@ -308,8 +307,8 @@ Destroy an empty pool. This will fail if the pool has any balance of coins. Implementation -
public fun destroy_empty(pool: Pool) {
-    assert!(pool.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
+
public fun destroy_empty(self: Pool) {
+    assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
     let Pool {
         shareholders_limit: _,
         total_coins: _,
@@ -317,7 +316,7 @@ Destroy an empty pool. This will fail if the pool has any balance of coins.
         shares: _,
         shareholders: _,
         scaling_factor: _,
-    } = pool;
+    } = self;
 }
 
@@ -329,10 +328,10 @@ Destroy an empty pool. This will fail if the pool has any balance of coins. ## Function `total_coins` -Return pool's total balance of coins. +Return self's total balance of coins. -
public fun total_coins(pool: &pool_u64::Pool): u64
+
public fun total_coins(self: &pool_u64::Pool): u64
 
@@ -341,8 +340,8 @@ Return pool's total balance of coins. Implementation -
public fun total_coins(pool: &Pool): u64 {
-    pool.total_coins
+
public fun total_coins(self: &Pool): u64 {
+    self.total_coins
 }
 
@@ -354,10 +353,10 @@ Return pool's total balance of coins. ## Function `total_shares` -Return the total number of shares across all shareholders in pool. +Return the total number of shares across all shareholders in self. -
public fun total_shares(pool: &pool_u64::Pool): u64
+
public fun total_shares(self: &pool_u64::Pool): u64
 
@@ -366,8 +365,8 @@ Return the total number of shares across all shareholders in pool. Implementation -
public fun total_shares(pool: &Pool): u64 {
-    pool.total_shares
+
public fun total_shares(self: &Pool): u64 {
+    self.total_shares
 }
 
@@ -379,10 +378,10 @@ Return the total number of shares across all shareholders in pool. ## Function `contains` -Return true if shareholder is in pool. +Return true if shareholder is in self. -
public fun contains(pool: &pool_u64::Pool, shareholder: address): bool
+
public fun contains(self: &pool_u64::Pool, shareholder: address): bool
 
@@ -391,8 +390,8 @@ Return true if shareholder is in pool. Implementation -
public fun contains(pool: &Pool, shareholder: address): bool {
-    simple_map::contains_key(&pool.shares, &shareholder)
+
public fun contains(self: &Pool, shareholder: address): bool {
+    self.shares.contains_key(&shareholder)
 }
 
@@ -404,10 +403,10 @@ Return true if shareholder is in pool. ## Function `shares` -Return the number of shares of stakeholder in pool. +Return the number of shares of stakeholder in self. -
public fun shares(pool: &pool_u64::Pool, shareholder: address): u64
+
public fun shares(self: &pool_u64::Pool, shareholder: address): u64
 
@@ -416,9 +415,9 @@ Return the number of shares of stakeholder in pool. Implementation -
public fun shares(pool: &Pool, shareholder: address): u64 {
-    if (contains(pool, shareholder)) {
-        *simple_map::borrow(&pool.shares, &shareholder)
+
public fun shares(self: &Pool, shareholder: address): u64 {
+    if (self.contains(shareholder)) {
+        *self.shares.borrow(&shareholder)
     } else {
         0
     }
@@ -433,10 +432,10 @@ Return the number of shares of stakeholder in pool.
 
 ## Function `balance`
 
-Return the balance in coins of shareholder in pool.
+Return the balance in coins of shareholder in self.
 
 
-
public fun balance(pool: &pool_u64::Pool, shareholder: address): u64
+
public fun balance(self: &pool_u64::Pool, shareholder: address): u64
 
@@ -445,9 +444,9 @@ Return the balance in coins of shareholder in pool. Implementation -
public fun balance(pool: &Pool, shareholder: address): u64 {
-    let num_shares = shares(pool, shareholder);
-    shares_to_amount(pool, num_shares)
+
public fun balance(self: &Pool, shareholder: address): u64 {
+    let num_shares = self.shares(shareholder);
+    self.shares_to_amount(num_shares)
 }
 
@@ -459,10 +458,10 @@ Return the balance in coins of shareholder in pool. ## Function `shareholders` -Return the list of shareholders in pool. +Return the list of shareholders in self. -
public fun shareholders(pool: &pool_u64::Pool): vector<address>
+
public fun shareholders(self: &pool_u64::Pool): vector<address>
 
@@ -471,8 +470,8 @@ Return the list of shareholders in pool. Implementation -
public fun shareholders(pool: &Pool): vector<address> {
-    pool.shareholders
+
public fun shareholders(self: &Pool): vector<address> {
+    self.shareholders
 }
 
@@ -484,10 +483,10 @@ Return the list of shareholders in pool. ## Function `shareholders_count` -Return the number of shareholders in pool. +Return the number of shareholders in self. -
public fun shareholders_count(pool: &pool_u64::Pool): u64
+
public fun shareholders_count(self: &pool_u64::Pool): u64
 
@@ -496,8 +495,8 @@ Return the number of shareholders in pool. Implementation -
public fun shareholders_count(pool: &Pool): u64 {
-    vector::length(&pool.shareholders)
+
public fun shareholders_count(self: &Pool): u64 {
+    self.shareholders.length()
 }
 
@@ -509,10 +508,10 @@ Return the number of shareholders in pool. ## Function `update_total_coins` -Update pool's total balance of coins. +Update self's total balance of coins. -
public fun update_total_coins(pool: &mut pool_u64::Pool, new_total_coins: u64)
+
public fun update_total_coins(self: &mut pool_u64::Pool, new_total_coins: u64)
 
@@ -521,8 +520,8 @@ Update pool's total balance of coins. Implementation -
public fun update_total_coins(pool: &mut Pool, new_total_coins: u64) {
-    pool.total_coins = new_total_coins;
+
public fun update_total_coins(self: &mut Pool, new_total_coins: u64) {
+    self.total_coins = new_total_coins;
 }
 
@@ -537,7 +536,7 @@ Update pool's total balance of coins. Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. -
public fun buy_in(pool: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
+
public fun buy_in(self: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
 
@@ -546,16 +545,16 @@ Allow an existing or new shareholder to add their coins to the pool in exchange Implementation -
public fun buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u64 {
+
public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u64 {
     if (coins_amount == 0) return 0;
 
-    let new_shares = amount_to_shares(pool, coins_amount);
-    assert!(MAX_U64 - pool.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
-    assert!(MAX_U64 - pool.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+    let new_shares = self.amount_to_shares(coins_amount);
+    assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+    assert!(MAX_U64 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
 
-    pool.total_coins = pool.total_coins + coins_amount;
-    pool.total_shares = pool.total_shares + new_shares;
-    add_shares(pool, shareholder, new_shares);
+    self.total_coins += coins_amount;
+    self.total_shares += new_shares;
+    self.add_shares(shareholder, new_shares);
     new_shares
 }
 
@@ -568,11 +567,11 @@ Allow an existing or new shareholder to add their coins to the pool in exchange ## Function `add_shares` -Add the number of shares directly for shareholder in pool. +Add the number of shares directly for shareholder in self. This would dilute other shareholders if the pool's balance of coins didn't change. -
fun add_shares(pool: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
+
fun add_shares(self: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
 
@@ -581,9 +580,9 @@ This would dilute other shareholders if the pool's balance of coins didn't chang Implementation -
fun add_shares(pool: &mut Pool, shareholder: address, new_shares: u64): u64 {
-    if (contains(pool, shareholder)) {
-        let existing_shares = simple_map::borrow_mut(&mut pool.shares, &shareholder);
+
fun add_shares(self: &mut Pool, shareholder: address, new_shares: u64): u64 {
+    if (self.contains(shareholder)) {
+        let existing_shares = self.shares.borrow_mut(&shareholder);
         let current_shares = *existing_shares;
         assert!(MAX_U64 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW));
 
@@ -591,12 +590,12 @@ This would dilute other shareholders if the pool's balance of coins didn't chang
         *existing_shares
     } else if (new_shares > 0) {
         assert!(
-            vector::length(&pool.shareholders) < pool.shareholders_limit,
+            self.shareholders.length() < self.shareholders_limit,
             error::invalid_state(ETOO_MANY_SHAREHOLDERS),
         );
 
-        vector::push_back(&mut pool.shareholders, shareholder);
-        simple_map::add(&mut pool.shares, shareholder, new_shares);
+        self.shareholders.push_back(shareholder);
+        self.shares.add(shareholder, new_shares);
         new_shares
     } else {
         new_shares
@@ -612,10 +611,10 @@ This would dilute other shareholders if the pool's balance of coins didn't chang
 
 ## Function `redeem_shares`
 
-Allow shareholder to redeem their shares in pool for coins.
+Allow shareholder to redeem their shares in self for coins.
 
 
-
public fun redeem_shares(pool: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
+
public fun redeem_shares(self: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
 
@@ -624,16 +623,16 @@ Allow shareholder to redeem their shares in pool for c Implementation -
public fun redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 {
-    assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
+
public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 {
+    assert!(self.contains(shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(self.shares(shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
 
     if (shares_to_redeem == 0) return 0;
 
-    let redeemed_coins = shares_to_amount(pool, shares_to_redeem);
-    pool.total_coins = pool.total_coins - redeemed_coins;
-    pool.total_shares = pool.total_shares - shares_to_redeem;
-    deduct_shares(pool, shareholder, shares_to_redeem);
+    let redeemed_coins = self.shares_to_amount(shares_to_redeem);
+    self.total_coins -= redeemed_coins;
+    self.total_shares -= shares_to_redeem;
+    self.deduct_shares(shareholder, shares_to_redeem);
 
     redeemed_coins
 }
@@ -650,7 +649,7 @@ Allow shareholder to redeem their shares in pool for c
 Transfer shares from shareholder_1 to shareholder_2.
 
 
-
public fun transfer_shares(pool: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
+
public fun transfer_shares(self: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
 
@@ -660,17 +659,17 @@ Transfer shares from shareholder_1 to shareholder_2.
public fun transfer_shares(
-    pool: &mut Pool,
+    self: &mut Pool,
     shareholder_1: address,
     shareholder_2: address,
     shares_to_transfer: u64,
 ) {
-    assert!(contains(pool, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
+    assert!(self.contains(shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(self.shares(shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
     if (shares_to_transfer == 0) return;
 
-    deduct_shares(pool, shareholder_1, shares_to_transfer);
-    add_shares(pool, shareholder_2, shares_to_transfer);
+    self.deduct_shares(shareholder_1, shares_to_transfer);
+    self.add_shares(shareholder_2, shares_to_transfer);
 }
 
@@ -682,10 +681,10 @@ Transfer shares from shareholder_1 to shareholder_2. ## Function `deduct_shares` -Directly deduct shareholder's number of shares in pool and return the number of remaining shares. +Directly deduct shareholder's number of shares in self and return the number of remaining shares. -
fun deduct_shares(pool: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
+
fun deduct_shares(self: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
 
@@ -694,19 +693,19 @@ Directly deduct shareholder's number of shares in pool Implementation -
fun deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u64): u64 {
-    assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
+
fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u64): u64 {
+    assert!(self.contains(shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(self.shares(shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
 
-    let existing_shares = simple_map::borrow_mut(&mut pool.shares, &shareholder);
-    *existing_shares = *existing_shares - num_shares;
+    let existing_shares = self.shares.borrow_mut(&shareholder);
+    *existing_shares -= num_shares;
 
     // Remove the shareholder completely if they have no shares left.
     let remaining_shares = *existing_shares;
     if (remaining_shares == 0) {
-        let (_, shareholder_index) = vector::index_of(&pool.shareholders, &shareholder);
-        vector::remove(&mut pool.shareholders, shareholder_index);
-        simple_map::remove(&mut pool.shares, &shareholder);
+        let (_, shareholder_index) = self.shareholders.index_of(&shareholder);
+        self.shareholders.remove(shareholder_index);
+        self.shares.remove(&shareholder);
     };
 
     remaining_shares
@@ -721,11 +720,11 @@ Directly deduct shareholder's number of shares in pool
 
 ## Function `amount_to_shares`
 
-Return the number of new shares coins_amount can buy in pool.
+Return the number of new shares coins_amount can buy in self.
 amount needs to big enough to avoid rounding number.
 
 
-
public fun amount_to_shares(pool: &pool_u64::Pool, coins_amount: u64): u64
+
public fun amount_to_shares(self: &pool_u64::Pool, coins_amount: u64): u64
 
@@ -734,8 +733,8 @@ Return the number of new shares coins_amount can buy in pool< Implementation -
public fun amount_to_shares(pool: &Pool, coins_amount: u64): u64 {
-    amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins)
+
public fun amount_to_shares(self: &Pool, coins_amount: u64): u64 {
+    self.amount_to_shares_with_total_coins(coins_amount, self.total_coins)
 }
 
@@ -747,11 +746,11 @@ Return the number of new shares coins_amount can buy in pool< ## Function `amount_to_shares_with_total_coins` -Return the number of new shares coins_amount can buy in pool with a custom total coins number. +Return the number of new shares coins_amount can buy in self with a custom total coins number. amount needs to big enough to avoid rounding number. -
public fun amount_to_shares_with_total_coins(pool: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
+
public fun amount_to_shares_with_total_coins(self: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
 
@@ -760,17 +759,17 @@ Return the number of new shares coins_amount can buy in pool< Implementation -
public fun amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u64 {
+
public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u64 {
     // No shares yet so amount is worth the same number of shares.
-    if (pool.total_coins == 0 || pool.total_shares == 0) {
+    if (self.total_coins == 0 || self.total_shares == 0) {
         // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems.
         // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow.
-        coins_amount * pool.scaling_factor
+        coins_amount * self.scaling_factor
     } else {
         // Shares price = total_coins / total existing shares.
         // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount.
         // We rearrange the calc and do multiplication first to avoid rounding errors.
-        multiply_then_divide(pool, coins_amount, pool.total_shares, total_coins)
+        self.multiply_then_divide(coins_amount, self.total_shares, total_coins)
     }
 }
 
@@ -783,11 +782,11 @@ Return the number of new shares coins_amount can buy in pool< ## Function `shares_to_amount` -Return the number of coins shares are worth in pool. +Return the number of coins shares are worth in self. shares needs to big enough to avoid rounding number. -
public fun shares_to_amount(pool: &pool_u64::Pool, shares: u64): u64
+
public fun shares_to_amount(self: &pool_u64::Pool, shares: u64): u64
 
@@ -796,8 +795,8 @@ Return the number of coins shares are worth in pool. Implementation -
public fun shares_to_amount(pool: &Pool, shares: u64): u64 {
-    shares_to_amount_with_total_coins(pool, shares, pool.total_coins)
+
public fun shares_to_amount(self: &Pool, shares: u64): u64 {
+    self.shares_to_amount_with_total_coins(shares, self.total_coins)
 }
 
@@ -809,11 +808,11 @@ Return the number of coins shares are worth in pool. ## Function `shares_to_amount_with_total_coins` -Return the number of coins shares are worth in pool with a custom total coins number. +Return the number of coins shares are worth in self with a custom total coins number. shares needs to big enough to avoid rounding number. -
public fun shares_to_amount_with_total_coins(pool: &pool_u64::Pool, shares: u64, total_coins: u64): u64
+
public fun shares_to_amount_with_total_coins(self: &pool_u64::Pool, shares: u64, total_coins: u64): u64
 
@@ -822,15 +821,15 @@ Return the number of coins shares are worth in pool wi Implementation -
public fun shares_to_amount_with_total_coins(pool: &Pool, shares: u64, total_coins: u64): u64 {
+
public fun shares_to_amount_with_total_coins(self: &Pool, shares: u64, total_coins: u64): u64 {
     // No shares or coins yet so shares are worthless.
-    if (pool.total_coins == 0 || pool.total_shares == 0) {
+    if (self.total_coins == 0 || self.total_shares == 0) {
         0
     } else {
         // Shares price = total_coins / total existing shares.
         // Shares worth = shares * shares price = shares * total_coins / total existing shares.
         // We rearrange the calc and do multiplication first to avoid rounding errors.
-        multiply_then_divide(pool, shares, total_coins, pool.total_shares)
+        self.multiply_then_divide(shares, total_coins, self.total_shares)
     }
 }
 
@@ -845,7 +844,7 @@ Return the number of coins shares are worth in pool wi -
public fun multiply_then_divide(_pool: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
+
public fun multiply_then_divide(self: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
 
@@ -854,33 +853,8 @@ Return the number of coins shares are worth in pool wi Implementation -
public fun multiply_then_divide(_pool: &Pool, x: u64, y: u64, z: u64): u64 {
-    let result = (to_u128(x) * to_u128(y)) / to_u128(z);
-    (result as u64)
-}
-
- - - - - - - -## Function `to_u128` - - - -
fun to_u128(num: u64): u128
-
- - - -
-Implementation - - -
fun to_u128(num: u64): u128 {
-    (num as u128)
+
public fun multiply_then_divide(self: &Pool, x: u64, y: u64, z: u64): u64 {
+    math64::mul_div(x, y, z)
 }
 
@@ -974,14 +948,14 @@ Return the number of coins shares are worth in pool wi ### Function `contains` -
public fun contains(pool: &pool_u64::Pool, shareholder: address): bool
+
public fun contains(self: &pool_u64::Pool, shareholder: address): bool
 
aborts_if false;
-ensures result == spec_contains(pool, shareholder);
+ensures result == spec_contains(self, shareholder);
 
@@ -1007,14 +981,14 @@ Return the number of coins shares are worth in pool wi ### Function `shares` -
public fun shares(pool: &pool_u64::Pool, shareholder: address): u64
+
public fun shares(self: &pool_u64::Pool, shareholder: address): u64
 
aborts_if false;
-ensures result == spec_shares(pool, shareholder);
+ensures result == spec_shares(self, shareholder);
 
@@ -1024,16 +998,16 @@ Return the number of coins shares are worth in pool wi ### Function `balance` -
public fun balance(pool: &pool_u64::Pool, shareholder: address): u64
+
public fun balance(self: &pool_u64::Pool, shareholder: address): u64
 
-
let shares = spec_shares(pool, shareholder);
-let total_coins = pool.total_coins;
-aborts_if pool.total_coins > 0 && pool.total_shares > 0 && (shares * total_coins) / pool.total_shares > MAX_U64;
-ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins);
+
let shares = spec_shares(self, shareholder);
+let total_coins = self.total_coins;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
 
@@ -1043,19 +1017,19 @@ Return the number of coins shares are worth in pool wi ### Function `buy_in` -
public fun buy_in(pool: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
+
public fun buy_in(self: &mut pool_u64::Pool, shareholder: address, coins_amount: u64): u64
 
-
let new_shares = spec_amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins);
-aborts_if pool.total_coins + coins_amount > MAX_U64;
-aborts_if pool.total_shares + new_shares > MAX_U64;
-include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares };
-include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares };
-ensures pool.total_coins == old(pool.total_coins) + coins_amount;
-ensures pool.total_shares == old(pool.total_shares) + new_shares;
+
let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins);
+aborts_if self.total_coins + coins_amount > MAX_U64;
+aborts_if self.total_shares + new_shares > MAX_U64;
+include coins_amount > 0 ==> AddSharesAbortsIf { new_shares };
+include coins_amount > 0 ==> AddSharesEnsures { new_shares };
+ensures self.total_coins == old(self.total_coins) + coins_amount;
+ensures self.total_shares == old(self.total_shares) + new_shares;
 ensures result == new_shares;
 
@@ -1066,7 +1040,7 @@ Return the number of coins shares are worth in pool wi ### Function `add_shares` -
fun add_shares(pool: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
+
fun add_shares(self: &mut pool_u64::Pool, shareholder: address, new_shares: u64): u64
 
@@ -1074,8 +1048,8 @@ Return the number of coins shares are worth in pool wi
include AddSharesAbortsIf;
 include AddSharesEnsures;
-let key_exists = simple_map::spec_contains_key(pool.shares, shareholder);
-ensures result == if (key_exists) { simple_map::spec_get(pool.shares, shareholder) }
+let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+ensures result == if (key_exists) { simple_map::spec_get(self.shares, shareholder) }
 else { new_shares };
 
@@ -1086,13 +1060,13 @@ Return the number of coins shares are worth in pool wi
schema AddSharesAbortsIf {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     new_shares: u64;
-    let key_exists = simple_map::spec_contains_key(pool.shares, shareholder);
-    let current_shares = simple_map::spec_get(pool.shares, shareholder);
+    let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+    let current_shares = simple_map::spec_get(self.shares, shareholder);
     aborts_if key_exists && current_shares + new_shares > MAX_U64;
-    aborts_if !key_exists && new_shares > 0 && len(pool.shareholders) >= pool.shareholders_limit;
+    aborts_if !key_exists && new_shares > 0 && len(self.shareholders) >= self.shareholders_limit;
 }
 
@@ -1103,17 +1077,17 @@ Return the number of coins shares are worth in pool wi
schema AddSharesEnsures {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     new_shares: u64;
-    let key_exists = simple_map::spec_contains_key(pool.shares, shareholder);
-    let current_shares = simple_map::spec_get(pool.shares, shareholder);
+    let key_exists = simple_map::spec_contains_key(self.shares, shareholder);
+    let current_shares = simple_map::spec_get(self.shares, shareholder);
     ensures key_exists ==>
-        pool.shares == simple_map::spec_set(old(pool.shares), shareholder, current_shares + new_shares);
+        self.shares == simple_map::spec_set(old(self.shares), shareholder, current_shares + new_shares);
     ensures (!key_exists && new_shares > 0) ==>
-        pool.shares == simple_map::spec_set(old(pool.shares), shareholder, new_shares);
+        self.shares == simple_map::spec_set(old(self.shares), shareholder, new_shares);
     ensures (!key_exists && new_shares > 0) ==>
-        vector::eq_push_back(pool.shareholders, old(pool.shareholders), shareholder);
+        vector::eq_push_back(self.shareholders, old(self.shareholders), shareholder);
 }
 
@@ -1140,20 +1114,22 @@ Return the number of coins shares are worth in pool wi ### Function `redeem_shares` -
public fun redeem_shares(pool: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
+
public fun redeem_shares(self: &mut pool_u64::Pool, shareholder: address, shares_to_redeem: u64): u64
 
-
let redeemed_coins = spec_shares_to_amount_with_total_coins(pool, shares_to_redeem, pool.total_coins);
-aborts_if !spec_contains(pool, shareholder);
-aborts_if spec_shares(pool, shareholder) < shares_to_redeem;
-aborts_if pool.total_coins < redeemed_coins;
-aborts_if pool.total_shares < shares_to_redeem;
-ensures pool.total_coins == old(pool.total_coins) - redeemed_coins;
-ensures pool.total_shares == old(pool.total_shares) - shares_to_redeem;
-include shares_to_redeem > 0 ==> DeductSharesEnsures { num_shares: shares_to_redeem };
+
let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins);
+aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < shares_to_redeem;
+aborts_if self.total_coins < redeemed_coins;
+aborts_if self.total_shares < shares_to_redeem;
+ensures self.total_coins == old(self.total_coins) - redeemed_coins;
+ensures self.total_shares == old(self.total_shares) - shares_to_redeem;
+include shares_to_redeem > 0 ==> DeductSharesEnsures {
+    num_shares: shares_to_redeem
+};
 ensures result == redeemed_coins;
 
@@ -1164,15 +1140,15 @@ Return the number of coins shares are worth in pool wi ### Function `transfer_shares` -
public fun transfer_shares(pool: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
+
public fun transfer_shares(self: &mut pool_u64::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64)
 
pragma aborts_if_is_partial;
-aborts_if !spec_contains(pool, shareholder_1);
-aborts_if spec_shares(pool, shareholder_1) < shares_to_transfer;
+aborts_if !spec_contains(self, shareholder_1);
+aborts_if spec_shares(self, shareholder_1) < shares_to_transfer;
 
@@ -1182,17 +1158,17 @@ Return the number of coins shares are worth in pool wi ### Function `deduct_shares` -
fun deduct_shares(pool: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
+
fun deduct_shares(self: &mut pool_u64::Pool, shareholder: address, num_shares: u64): u64
 
-
aborts_if !spec_contains(pool, shareholder);
-aborts_if spec_shares(pool, shareholder) < num_shares;
+
aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < num_shares;
 include DeductSharesEnsures;
-let remaining_shares = simple_map::spec_get(pool.shares, shareholder) - num_shares;
-ensures remaining_shares > 0 ==> result == simple_map::spec_get(pool.shares, shareholder);
+let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares;
+ensures remaining_shares > 0 ==> result == simple_map::spec_get(self.shares, shareholder);
 ensures remaining_shares == 0 ==> result == 0;
 
@@ -1203,13 +1179,13 @@ Return the number of coins shares are worth in pool wi
schema DeductSharesEnsures {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     num_shares: u64;
-    let remaining_shares = simple_map::spec_get(pool.shares, shareholder) - num_shares;
-    ensures remaining_shares > 0 ==> simple_map::spec_get(pool.shares, shareholder) == remaining_shares;
-    ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(pool.shares, shareholder);
-    ensures remaining_shares == 0 ==> !vector::spec_contains(pool.shareholders, shareholder);
+    let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares;
+    ensures remaining_shares > 0 ==> simple_map::spec_get(self.shares, shareholder) == remaining_shares;
+    ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(self.shares, shareholder);
+    ensures remaining_shares == 0 ==> !vector::spec_contains(self.shareholders, shareholder);
 }
 
@@ -1220,18 +1196,18 @@ Return the number of coins shares are worth in pool wi ### Function `amount_to_shares_with_total_coins` -
public fun amount_to_shares_with_total_coins(pool: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
+
public fun amount_to_shares_with_total_coins(self: &pool_u64::Pool, coins_amount: u64, total_coins: u64): u64
 
-
aborts_if pool.total_coins > 0 && pool.total_shares > 0
-    && (coins_amount * pool.total_shares) / total_coins > MAX_U64;
-aborts_if (pool.total_coins == 0 || pool.total_shares == 0)
-    && coins_amount * pool.scaling_factor > MAX_U64;
-aborts_if pool.total_coins > 0 && pool.total_shares > 0 && total_coins == 0;
-ensures result == spec_amount_to_shares_with_total_coins(pool, coins_amount, total_coins);
+
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (coins_amount * self.total_shares) / total_coins > MAX_U64;
+aborts_if (self.total_coins == 0 || self.total_shares == 0)
+    && coins_amount * self.scaling_factor > MAX_U64;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0;
+ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins);
 
@@ -1241,15 +1217,15 @@ Return the number of coins shares are worth in pool wi ### Function `shares_to_amount_with_total_coins` -
public fun shares_to_amount_with_total_coins(pool: &pool_u64::Pool, shares: u64, total_coins: u64): u64
+
public fun shares_to_amount_with_total_coins(self: &pool_u64::Pool, shares: u64, total_coins: u64): u64
 
-
aborts_if pool.total_coins > 0 && pool.total_shares > 0
-    && (shares * total_coins) / pool.total_shares > MAX_U64;
-ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins);
+
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
 
@@ -1275,7 +1251,7 @@ Return the number of coins shares are worth in pool wi ### Function `multiply_then_divide` -
public fun multiply_then_divide(_pool: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
+
public fun multiply_then_divide(self: &pool_u64::Pool, x: u64, y: u64, z: u64): u64
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/pool_u64_unbound.md b/aptos-move/framework/aptos-stdlib/doc/pool_u64_unbound.md index 281fb751e7dfd..32bf0190c148e 100644 --- a/aptos-move/framework/aptos-stdlib/doc/pool_u64_unbound.md +++ b/aptos-move/framework/aptos-stdlib/doc/pool_u64_unbound.md @@ -43,8 +43,6 @@ shares left. - [Function `shares_to_amount_with_total_coins`](#0x1_pool_u64_unbound_shares_to_amount_with_total_coins) - [Function `shares_to_amount_with_total_stats`](#0x1_pool_u64_unbound_shares_to_amount_with_total_stats) - [Function `multiply_then_divide`](#0x1_pool_u64_unbound_multiply_then_divide) -- [Function `to_u128`](#0x1_pool_u64_unbound_to_u128) -- [Function `to_u256`](#0x1_pool_u64_unbound_to_u256) - [Specification](#@Specification_1) - [Struct `Pool`](#@Specification_1_Pool) - [Function `contains`](#@Specification_1_contains) @@ -58,8 +56,6 @@ shares left. - [Function `amount_to_shares_with_total_coins`](#@Specification_1_amount_to_shares_with_total_coins) - [Function `shares_to_amount_with_total_coins`](#@Specification_1_shares_to_amount_with_total_coins) - [Function `multiply_then_divide`](#@Specification_1_multiply_then_divide) - - [Function `to_u128`](#@Specification_1_to_u128) - - [Function `to_u256`](#@Specification_1_to_u256)
use 0x1::error;
@@ -296,7 +292,7 @@ Create a new pool with custom scaling_factor.
 Destroy an empty pool. This will fail if the pool has any balance of coins.
 
 
-
public fun destroy_empty(pool: pool_u64_unbound::Pool)
+
public fun destroy_empty(self: pool_u64_unbound::Pool)
 
@@ -305,15 +301,15 @@ Destroy an empty pool. This will fail if the pool has any balance of coins. Implementation -
public fun destroy_empty(pool: Pool) {
-    assert!(pool.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
+
public fun destroy_empty(self: Pool) {
+    assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY));
     let Pool {
         total_coins: _,
         total_shares: _,
         shares,
         scaling_factor: _,
-    } = pool;
-    table::destroy_empty<address, u128>(shares);
+    } = self;
+    shares.destroy_empty::<address, u128>();
 }
 
@@ -325,10 +321,10 @@ Destroy an empty pool. This will fail if the pool has any balance of coins. ## Function `total_coins` -Return pool's total balance of coins. +Return self's total balance of coins. -
public fun total_coins(pool: &pool_u64_unbound::Pool): u64
+
public fun total_coins(self: &pool_u64_unbound::Pool): u64
 
@@ -337,8 +333,8 @@ Return pool's total balance of coins. Implementation -
public fun total_coins(pool: &Pool): u64 {
-    pool.total_coins
+
public fun total_coins(self: &Pool): u64 {
+    self.total_coins
 }
 
@@ -350,10 +346,10 @@ Return pool's total balance of coins. ## Function `total_shares` -Return the total number of shares across all shareholders in pool. +Return the total number of shares across all shareholders in self. -
public fun total_shares(pool: &pool_u64_unbound::Pool): u128
+
public fun total_shares(self: &pool_u64_unbound::Pool): u128
 
@@ -362,8 +358,8 @@ Return the total number of shares across all shareholders in pool. Implementation -
public fun total_shares(pool: &Pool): u128 {
-    pool.total_shares
+
public fun total_shares(self: &Pool): u128 {
+    self.total_shares
 }
 
@@ -375,10 +371,10 @@ Return the total number of shares across all shareholders in pool. ## Function `contains` -Return true if shareholder is in pool. +Return true if shareholder is in self. -
public fun contains(pool: &pool_u64_unbound::Pool, shareholder: address): bool
+
public fun contains(self: &pool_u64_unbound::Pool, shareholder: address): bool
 
@@ -387,8 +383,8 @@ Return true if shareholder is in pool. Implementation -
public fun contains(pool: &Pool, shareholder: address): bool {
-    table::contains(&pool.shares, shareholder)
+
public fun contains(self: &Pool, shareholder: address): bool {
+    self.shares.contains(shareholder)
 }
 
@@ -400,10 +396,10 @@ Return true if shareholder is in pool. ## Function `shares` -Return the number of shares of stakeholder in pool. +Return the number of shares of stakeholder in self. -
public fun shares(pool: &pool_u64_unbound::Pool, shareholder: address): u128
+
public fun shares(self: &pool_u64_unbound::Pool, shareholder: address): u128
 
@@ -412,9 +408,9 @@ Return the number of shares of stakeholder in pool. Implementation -
public fun shares(pool: &Pool, shareholder: address): u128 {
-    if (contains(pool, shareholder)) {
-        *table::borrow(&pool.shares, shareholder)
+
public fun shares(self: &Pool, shareholder: address): u128 {
+    if (self.contains(shareholder)) {
+        *self.shares.borrow(shareholder)
     } else {
         0
     }
@@ -429,10 +425,10 @@ Return the number of shares of stakeholder in pool.
 
 ## Function `balance`
 
-Return the balance in coins of shareholder in pool.
+Return the balance in coins of shareholder in self.
 
 
-
public fun balance(pool: &pool_u64_unbound::Pool, shareholder: address): u64
+
public fun balance(self: &pool_u64_unbound::Pool, shareholder: address): u64
 
@@ -441,9 +437,9 @@ Return the balance in coins of shareholder in pool. Implementation -
public fun balance(pool: &Pool, shareholder: address): u64 {
-    let num_shares = shares(pool, shareholder);
-    shares_to_amount(pool, num_shares)
+
public fun balance(self: &Pool, shareholder: address): u64 {
+    let num_shares = self.shares(shareholder);
+    self.shares_to_amount(num_shares)
 }
 
@@ -455,10 +451,10 @@ Return the balance in coins of shareholder in pool. ## Function `shareholders_count` -Return the number of shareholders in pool. +Return the number of shareholders in self. -
public fun shareholders_count(pool: &pool_u64_unbound::Pool): u64
+
public fun shareholders_count(self: &pool_u64_unbound::Pool): u64
 
@@ -467,8 +463,8 @@ Return the number of shareholders in pool. Implementation -
public fun shareholders_count(pool: &Pool): u64 {
-    table::length(&pool.shares)
+
public fun shareholders_count(self: &Pool): u64 {
+    self.shares.length()
 }
 
@@ -480,10 +476,10 @@ Return the number of shareholders in pool. ## Function `update_total_coins` -Update pool's total balance of coins. +Update self's total balance of coins. -
public fun update_total_coins(pool: &mut pool_u64_unbound::Pool, new_total_coins: u64)
+
public fun update_total_coins(self: &mut pool_u64_unbound::Pool, new_total_coins: u64)
 
@@ -492,8 +488,8 @@ Update pool's total balance of coins. Implementation -
public fun update_total_coins(pool: &mut Pool, new_total_coins: u64) {
-    pool.total_coins = new_total_coins;
+
public fun update_total_coins(self: &mut Pool, new_total_coins: u64) {
+    self.total_coins = new_total_coins;
 }
 
@@ -508,7 +504,7 @@ Update pool's total balance of coins. Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. -
public fun buy_in(pool: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
+
public fun buy_in(self: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
 
@@ -517,16 +513,16 @@ Allow an existing or new shareholder to add their coins to the pool in exchange Implementation -
public fun buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u128 {
+
public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u128 {
     if (coins_amount == 0) return 0;
 
-    let new_shares = amount_to_shares(pool, coins_amount);
-    assert!(MAX_U64 - pool.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
-    assert!(MAX_U128 - pool.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW));
+    let new_shares = self.amount_to_shares(coins_amount);
+    assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW));
+    assert!(MAX_U128 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW));
 
-    pool.total_coins = pool.total_coins + coins_amount;
-    pool.total_shares = pool.total_shares + new_shares;
-    add_shares(pool, shareholder, new_shares);
+    self.total_coins += coins_amount;
+    self.total_shares += new_shares;
+    self.add_shares(shareholder, new_shares);
     new_shares
 }
 
@@ -539,11 +535,11 @@ Allow an existing or new shareholder to add their coins to the pool in exchange ## Function `add_shares` -Add the number of shares directly for shareholder in pool. +Add the number of shares directly for shareholder in self. This would dilute other shareholders if the pool's balance of coins didn't change. -
fun add_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
+
fun add_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
 
@@ -552,16 +548,16 @@ This would dilute other shareholders if the pool's balance of coins didn't chang Implementation -
fun add_shares(pool: &mut Pool, shareholder: address, new_shares: u128): u128 {
-    if (contains(pool, shareholder)) {
-        let existing_shares = table::borrow_mut(&mut pool.shares, shareholder);
+
fun add_shares(self: &mut Pool, shareholder: address, new_shares: u128): u128 {
+    if (self.contains(shareholder)) {
+        let existing_shares = self.shares.borrow_mut(shareholder);
         let current_shares = *existing_shares;
         assert!(MAX_U128 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW));
 
         *existing_shares = current_shares + new_shares;
         *existing_shares
     } else if (new_shares > 0) {
-        table::add(&mut pool.shares, shareholder, new_shares);
+        self.shares.add(shareholder, new_shares);
         new_shares
     } else {
         new_shares
@@ -577,10 +573,10 @@ This would dilute other shareholders if the pool's balance of coins didn't chang
 
 ## Function `redeem_shares`
 
-Allow shareholder to redeem their shares in pool for coins.
+Allow shareholder to redeem their shares in self for coins.
 
 
-
public fun redeem_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
+
public fun redeem_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
 
@@ -589,16 +585,16 @@ Allow shareholder to redeem their shares in pool for c Implementation -
public fun redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 {
-    assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
+
public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 {
+    assert!(self.contains(shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(self.shares(shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES));
 
     if (shares_to_redeem == 0) return 0;
 
-    let redeemed_coins = shares_to_amount(pool, shares_to_redeem);
-    pool.total_coins = pool.total_coins - redeemed_coins;
-    pool.total_shares = pool.total_shares - shares_to_redeem;
-    deduct_shares(pool, shareholder, shares_to_redeem);
+    let redeemed_coins = self.shares_to_amount(shares_to_redeem);
+    self.total_coins -= redeemed_coins;
+    self.total_shares -= shares_to_redeem;
+    self.deduct_shares(shareholder, shares_to_redeem);
 
     redeemed_coins
 }
@@ -615,7 +611,7 @@ Allow shareholder to redeem their shares in pool for c
 Transfer shares from shareholder_1 to shareholder_2.
 
 
-
public fun transfer_shares(pool: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
+
public fun transfer_shares(self: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
 
@@ -625,17 +621,17 @@ Transfer shares from shareholder_1 to shareholder_2.
public fun transfer_shares(
-    pool: &mut Pool,
+    self: &mut Pool,
     shareholder_1: address,
     shareholder_2: address,
     shares_to_transfer: u128,
 ) {
-    assert!(contains(pool, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
+    assert!(self.contains(shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(self.shares(shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES));
     if (shares_to_transfer == 0) return;
 
-    deduct_shares(pool, shareholder_1, shares_to_transfer);
-    add_shares(pool, shareholder_2, shares_to_transfer);
+    self.deduct_shares(shareholder_1, shares_to_transfer);
+    self.add_shares(shareholder_2, shares_to_transfer);
 }
 
@@ -647,10 +643,10 @@ Transfer shares from shareholder_1 to shareholder_2. ## Function `deduct_shares` -Directly deduct shareholder's number of shares in pool and return the number of remaining shares. +Directly deduct shareholder's number of shares in self and return the number of remaining shares. -
fun deduct_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
+
fun deduct_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
 
@@ -659,17 +655,17 @@ Directly deduct shareholder's number of shares in pool Implementation -
fun deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u128): u128 {
-    assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
-    assert!(shares(pool, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
+
fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u128): u128 {
+    assert!(self.contains(shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND));
+    assert!(self.shares(shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES));
 
-    let existing_shares = table::borrow_mut(&mut pool.shares, shareholder);
-    *existing_shares = *existing_shares - num_shares;
+    let existing_shares = self.shares.borrow_mut(shareholder);
+    *existing_shares -= num_shares;
 
     // Remove the shareholder completely if they have no shares left.
     let remaining_shares = *existing_shares;
     if (remaining_shares == 0) {
-        table::remove(&mut pool.shares, shareholder);
+        self.shares.remove(shareholder);
     };
 
     remaining_shares
@@ -684,11 +680,11 @@ Directly deduct shareholder's number of shares in pool
 
 ## Function `amount_to_shares`
 
-Return the number of new shares coins_amount can buy in pool.
+Return the number of new shares coins_amount can buy in self.
 amount needs to big enough to avoid rounding number.
 
 
-
public fun amount_to_shares(pool: &pool_u64_unbound::Pool, coins_amount: u64): u128
+
public fun amount_to_shares(self: &pool_u64_unbound::Pool, coins_amount: u64): u128
 
@@ -697,8 +693,8 @@ Return the number of new shares coins_amount can buy in pool< Implementation -
public fun amount_to_shares(pool: &Pool, coins_amount: u64): u128 {
-    amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins)
+
public fun amount_to_shares(self: &Pool, coins_amount: u64): u128 {
+    self.amount_to_shares_with_total_coins(coins_amount, self.total_coins)
 }
 
@@ -710,11 +706,11 @@ Return the number of new shares coins_amount can buy in pool< ## Function `amount_to_shares_with_total_coins` -Return the number of new shares coins_amount can buy in pool with a custom total coins number. +Return the number of new shares coins_amount can buy in self with a custom total coins number. amount needs to big enough to avoid rounding number. -
public fun amount_to_shares_with_total_coins(pool: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
+
public fun amount_to_shares_with_total_coins(self: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
 
@@ -723,17 +719,17 @@ Return the number of new shares coins_amount can buy in pool< Implementation -
public fun amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u128 {
+
public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u128 {
     // No shares yet so amount is worth the same number of shares.
-    if (pool.total_coins == 0 || pool.total_shares == 0) {
+    if (self.total_coins == 0 || self.total_shares == 0) {
         // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems.
         // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow.
-        to_u128(coins_amount) * to_u128(pool.scaling_factor)
+        (coins_amount as u128) * (self.scaling_factor as u128)
     } else {
         // Shares price = total_coins / total existing shares.
         // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount.
         // We rearrange the calc and do multiplication first to avoid rounding errors.
-        multiply_then_divide(pool, to_u128(coins_amount), pool.total_shares, to_u128(total_coins))
+        self.multiply_then_divide(coins_amount as u128, self.total_shares, total_coins as u128)
     }
 }
 
@@ -746,11 +742,11 @@ Return the number of new shares coins_amount can buy in pool< ## Function `shares_to_amount` -Return the number of coins shares are worth in pool. +Return the number of coins shares are worth in self. shares needs to big enough to avoid rounding number. -
public fun shares_to_amount(pool: &pool_u64_unbound::Pool, shares: u128): u64
+
public fun shares_to_amount(self: &pool_u64_unbound::Pool, shares: u128): u64
 
@@ -759,8 +755,8 @@ Return the number of coins shares are worth in pool. Implementation -
public fun shares_to_amount(pool: &Pool, shares: u128): u64 {
-    shares_to_amount_with_total_coins(pool, shares, pool.total_coins)
+
public fun shares_to_amount(self: &Pool, shares: u128): u64 {
+    self.shares_to_amount_with_total_coins(shares, self.total_coins)
 }
 
@@ -772,11 +768,11 @@ Return the number of coins shares are worth in pool. ## Function `shares_to_amount_with_total_coins` -Return the number of coins shares are worth in pool with a custom total coins number. +Return the number of coins shares are worth in self with a custom total coins number. shares needs to big enough to avoid rounding number. -
public fun shares_to_amount_with_total_coins(pool: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
+
public fun shares_to_amount_with_total_coins(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
 
@@ -785,15 +781,15 @@ Return the number of coins shares are worth in pool wi Implementation -
public fun shares_to_amount_with_total_coins(pool: &Pool, shares: u128, total_coins: u64): u64 {
+
public fun shares_to_amount_with_total_coins(self: &Pool, shares: u128, total_coins: u64): u64 {
     // No shares or coins yet so shares are worthless.
-    if (pool.total_coins == 0 || pool.total_shares == 0) {
+    if (self.total_coins == 0 || self.total_shares == 0) {
         0
     } else {
         // Shares price = total_coins / total existing shares.
         // Shares worth = shares * shares price = shares * total_coins / total existing shares.
         // We rearrange the calc and do multiplication first to avoid rounding errors.
-        (multiply_then_divide(pool, shares, to_u128(total_coins), pool.total_shares) as u64)
+        (self.multiply_then_divide(shares, total_coins as u128, self.total_shares) as u64)
     }
 }
 
@@ -809,7 +805,7 @@ Return the number of coins shares are worth in pool wi Return the number of coins shares are worth in pool with custom total coins and shares numbers. -
public fun shares_to_amount_with_total_stats(pool: &pool_u64_unbound::Pool, shares: u128, total_coins: u64, total_shares: u128): u64
+
public fun shares_to_amount_with_total_stats(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64, total_shares: u128): u64
 
@@ -819,15 +815,15 @@ Return the number of coins shares are worth in pool wi
public fun shares_to_amount_with_total_stats(
-    pool: &Pool,
+    self: &Pool,
     shares: u128,
     total_coins: u64,
     total_shares: u128,
 ): u64 {
-    if (pool.total_coins == 0 || total_shares == 0) {
+    if (self.total_coins == 0 || total_shares == 0) {
         0
     } else {
-        (multiply_then_divide(pool, shares, to_u128(total_coins), total_shares) as u64)
+        (self.multiply_then_divide(shares, total_coins as u128, total_shares) as u64)
     }
 }
 
@@ -842,7 +838,7 @@ Return the number of coins shares are worth in pool wi -
public fun multiply_then_divide(_pool: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
+
public fun multiply_then_divide(self: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
 
@@ -851,57 +847,8 @@ Return the number of coins shares are worth in pool wi Implementation -
public fun multiply_then_divide(_pool: &Pool, x: u128, y: u128, z: u128): u128 {
-    let result = (to_u256(x) * to_u256(y)) / to_u256(z);
-    (result as u128)
-}
-
- - - -
- - - -## Function `to_u128` - - - -
fun to_u128(num: u64): u128
-
- - - -
-Implementation - - -
fun to_u128(num: u64): u128 {
-    (num as u128)
-}
-
- - - -
- - - -## Function `to_u256` - - - -
fun to_u256(num: u128): u256
-
- - - -
-Implementation - - -
fun to_u256(num: u128): u256 {
-    (num as u256)
+
public fun multiply_then_divide(self: &Pool, x: u128, y: u128, z: u128): u128 {
+    math128::mul_div(x, y, z)
 }
 
@@ -975,14 +922,14 @@ Return the number of coins shares are worth in pool wi ### Function `contains` -
public fun contains(pool: &pool_u64_unbound::Pool, shareholder: address): bool
+
public fun contains(self: &pool_u64_unbound::Pool, shareholder: address): bool
 
aborts_if false;
-ensures result == spec_contains(pool, shareholder);
+ensures result == spec_contains(self, shareholder);
 
@@ -1008,14 +955,14 @@ Return the number of coins shares are worth in pool wi ### Function `shares` -
public fun shares(pool: &pool_u64_unbound::Pool, shareholder: address): u128
+
public fun shares(self: &pool_u64_unbound::Pool, shareholder: address): u128
 
aborts_if false;
-ensures result == spec_shares(pool, shareholder);
+ensures result == spec_shares(self, shareholder);
 
@@ -1025,16 +972,16 @@ Return the number of coins shares are worth in pool wi ### Function `balance` -
public fun balance(pool: &pool_u64_unbound::Pool, shareholder: address): u64
+
public fun balance(self: &pool_u64_unbound::Pool, shareholder: address): u64
 
-
let shares = spec_shares(pool, shareholder);
-let total_coins = pool.total_coins;
-aborts_if pool.total_coins > 0 && pool.total_shares > 0 && (shares * total_coins) / pool.total_shares > MAX_U64;
-ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins);
+
let shares = spec_shares(self, shareholder);
+let total_coins = self.total_coins;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
 
@@ -1044,19 +991,19 @@ Return the number of coins shares are worth in pool wi ### Function `buy_in` -
public fun buy_in(pool: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
+
public fun buy_in(self: &mut pool_u64_unbound::Pool, shareholder: address, coins_amount: u64): u128
 
-
let new_shares = spec_amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins);
-aborts_if pool.total_coins + coins_amount > MAX_U64;
-aborts_if pool.total_shares + new_shares > MAX_U128;
-include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares };
-include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares };
-ensures pool.total_coins == old(pool.total_coins) + coins_amount;
-ensures pool.total_shares == old(pool.total_shares) + new_shares;
+
let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins);
+aborts_if self.total_coins + coins_amount > MAX_U64;
+aborts_if self.total_shares + new_shares > MAX_U128;
+include coins_amount > 0 ==> AddSharesAbortsIf { new_shares };
+include coins_amount > 0 ==> AddSharesEnsures { new_shares };
+ensures self.total_coins == old(self.total_coins) + coins_amount;
+ensures self.total_shares == old(self.total_shares) + new_shares;
 ensures result == new_shares;
 
@@ -1067,7 +1014,7 @@ Return the number of coins shares are worth in pool wi ### Function `add_shares` -
fun add_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
+
fun add_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, new_shares: u128): u128
 
@@ -1075,8 +1022,8 @@ Return the number of coins shares are worth in pool wi
include AddSharesAbortsIf;
 include AddSharesEnsures;
-let key_exists = table::spec_contains(pool.shares, shareholder);
-ensures result == if (key_exists) { table::spec_get(pool.shares, shareholder) }
+let key_exists = table::spec_contains(self.shares, shareholder);
+ensures result == if (key_exists) { table::spec_get(self.shares, shareholder) }
 else { new_shares };
 
@@ -1087,11 +1034,11 @@ Return the number of coins shares are worth in pool wi
schema AddSharesAbortsIf {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     new_shares: u64;
-    let key_exists = table::spec_contains(pool.shares, shareholder);
-    let current_shares = table::spec_get(pool.shares, shareholder);
+    let key_exists = table::spec_contains(self.shares, shareholder);
+    let current_shares = table::spec_get(self.shares, shareholder);
     aborts_if key_exists && current_shares + new_shares > MAX_U128;
 }
 
@@ -1103,15 +1050,15 @@ Return the number of coins shares are worth in pool wi
schema AddSharesEnsures {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     new_shares: u64;
-    let key_exists = table::spec_contains(pool.shares, shareholder);
-    let current_shares = table::spec_get(pool.shares, shareholder);
+    let key_exists = table::spec_contains(self.shares, shareholder);
+    let current_shares = table::spec_get(self.shares, shareholder);
     ensures key_exists ==>
-        pool.shares == table::spec_set(old(pool.shares), shareholder, current_shares + new_shares);
+        self.shares == table::spec_set(old(self.shares), shareholder, current_shares + new_shares);
     ensures (!key_exists && new_shares > 0) ==>
-        pool.shares == table::spec_set(old(pool.shares), shareholder, new_shares);
+        self.shares == table::spec_set(old(self.shares), shareholder, new_shares);
 }
 
@@ -1138,20 +1085,22 @@ Return the number of coins shares are worth in pool wi ### Function `redeem_shares` -
public fun redeem_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
+
public fun redeem_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, shares_to_redeem: u128): u64
 
-
let redeemed_coins = spec_shares_to_amount_with_total_coins(pool, shares_to_redeem, pool.total_coins);
-aborts_if !spec_contains(pool, shareholder);
-aborts_if spec_shares(pool, shareholder) < shares_to_redeem;
-aborts_if pool.total_coins < redeemed_coins;
-aborts_if pool.total_shares < shares_to_redeem;
-ensures pool.total_coins == old(pool.total_coins) - redeemed_coins;
-ensures pool.total_shares == old(pool.total_shares) - shares_to_redeem;
-include shares_to_redeem > 0 ==> DeductSharesEnsures { num_shares: shares_to_redeem };
+
let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins);
+aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < shares_to_redeem;
+aborts_if self.total_coins < redeemed_coins;
+aborts_if self.total_shares < shares_to_redeem;
+ensures self.total_coins == old(self.total_coins) - redeemed_coins;
+ensures self.total_shares == old(self.total_shares) - shares_to_redeem;
+include shares_to_redeem > 0 ==> DeductSharesEnsures {
+    num_shares: shares_to_redeem
+};
 ensures result == redeemed_coins;
 
@@ -1162,27 +1111,28 @@ Return the number of coins shares are worth in pool wi ### Function `transfer_shares` -
public fun transfer_shares(pool: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
+
public fun transfer_shares(self: &mut pool_u64_unbound::Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128)
 
-
aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(pool, shareholder_2) &&
-    (spec_shares(pool, shareholder_2) + shares_to_transfer > MAX_U128);
-aborts_if !spec_contains(pool, shareholder_1);
-aborts_if spec_shares(pool, shareholder_1) < shares_to_transfer;
-ensures shareholder_1 == shareholder_2 ==> spec_shares(old(pool), shareholder_1) == spec_shares(pool, shareholder_1);
-ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(pool), shareholder_1) == shares_to_transfer)) ==>
-    !spec_contains(pool, shareholder_1);
+
aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(self, shareholder_2) &&
+    (spec_shares(self, shareholder_2) + shares_to_transfer > MAX_U128);
+aborts_if !spec_contains(self, shareholder_1);
+aborts_if spec_shares(self, shareholder_1) < shares_to_transfer;
+ensures shareholder_1 == shareholder_2 ==> spec_shares(old(self), shareholder_1) == spec_shares(
+    self, shareholder_1);
+ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) == shares_to_transfer)) ==>
+    !spec_contains(self, shareholder_1);
 ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0) ==>
-    (spec_contains(pool, shareholder_2));
-ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(pool), shareholder_2)) ==>
-    (spec_contains(pool, shareholder_2) && spec_shares(pool, shareholder_2) == shares_to_transfer);
-ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(pool), shareholder_2)) ==>
-    (spec_contains(pool, shareholder_2) && spec_shares(pool, shareholder_2) == spec_shares(old(pool), shareholder_2) + shares_to_transfer);
-ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(pool), shareholder_1) > shares_to_transfer)) ==>
-    (spec_contains(pool, shareholder_1) && (spec_shares(pool, shareholder_1) == spec_shares(old(pool), shareholder_1) - shares_to_transfer));
+    (spec_contains(self, shareholder_2));
+ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(self), shareholder_2)) ==>
+    (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == shares_to_transfer);
+ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(self), shareholder_2)) ==>
+    (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == spec_shares(old(self), shareholder_2) + shares_to_transfer);
+ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) > shares_to_transfer)) ==>
+    (spec_contains(self, shareholder_1) && (spec_shares(self, shareholder_1) == spec_shares(old(self), shareholder_1) - shares_to_transfer));
 
@@ -1192,17 +1142,17 @@ Return the number of coins shares are worth in pool wi ### Function `deduct_shares` -
fun deduct_shares(pool: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
+
fun deduct_shares(self: &mut pool_u64_unbound::Pool, shareholder: address, num_shares: u128): u128
 
-
aborts_if !spec_contains(pool, shareholder);
-aborts_if spec_shares(pool, shareholder) < num_shares;
+
aborts_if !spec_contains(self, shareholder);
+aborts_if spec_shares(self, shareholder) < num_shares;
 include DeductSharesEnsures;
-let remaining_shares = table::spec_get(pool.shares, shareholder) - num_shares;
-ensures remaining_shares > 0 ==> result == table::spec_get(pool.shares, shareholder);
+let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares;
+ensures remaining_shares > 0 ==> result == table::spec_get(self.shares, shareholder);
 ensures remaining_shares == 0 ==> result == 0;
 
@@ -1213,12 +1163,12 @@ Return the number of coins shares are worth in pool wi
schema DeductSharesEnsures {
-    pool: Pool;
+    self: Pool;
     shareholder: address;
     num_shares: u64;
-    let remaining_shares = table::spec_get(pool.shares, shareholder) - num_shares;
-    ensures remaining_shares > 0 ==> table::spec_get(pool.shares, shareholder) == remaining_shares;
-    ensures remaining_shares == 0 ==> !table::spec_contains(pool.shares, shareholder);
+    let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares;
+    ensures remaining_shares > 0 ==> table::spec_get(self.shares, shareholder) == remaining_shares;
+    ensures remaining_shares == 0 ==> !table::spec_contains(self.shares, shareholder);
 }
 
@@ -1229,18 +1179,18 @@ Return the number of coins shares are worth in pool wi ### Function `amount_to_shares_with_total_coins` -
public fun amount_to_shares_with_total_coins(pool: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
+
public fun amount_to_shares_with_total_coins(self: &pool_u64_unbound::Pool, coins_amount: u64, total_coins: u64): u128
 
-
aborts_if pool.total_coins > 0 && pool.total_shares > 0
-    && (coins_amount * pool.total_shares) / total_coins > MAX_U128;
-aborts_if (pool.total_coins == 0 || pool.total_shares == 0)
-    && coins_amount * pool.scaling_factor > MAX_U128;
-aborts_if pool.total_coins > 0 && pool.total_shares > 0 && total_coins == 0;
-ensures result == spec_amount_to_shares_with_total_coins(pool, coins_amount, total_coins);
+
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (coins_amount * self.total_shares) / total_coins > MAX_U128;
+aborts_if (self.total_coins == 0 || self.total_shares == 0)
+    && coins_amount * self.scaling_factor > MAX_U128;
+aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0;
+ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins);
 
@@ -1250,15 +1200,15 @@ Return the number of coins shares are worth in pool wi ### Function `shares_to_amount_with_total_coins` -
public fun shares_to_amount_with_total_coins(pool: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
+
public fun shares_to_amount_with_total_coins(self: &pool_u64_unbound::Pool, shares: u128, total_coins: u64): u64
 
-
aborts_if pool.total_coins > 0 && pool.total_shares > 0
-    && (shares * total_coins) / pool.total_shares > MAX_U64;
-ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins);
+
aborts_if self.total_coins > 0 && self.total_shares > 0
+    && (shares * total_coins) / self.total_shares > MAX_U64;
+ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins);
 
@@ -1284,7 +1234,7 @@ Return the number of coins shares are worth in pool wi ### Function `multiply_then_divide` -
public fun multiply_then_divide(_pool: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
+
public fun multiply_then_divide(self: &pool_u64_unbound::Pool, x: u128, y: u128, z: u128): u128
 
@@ -1296,38 +1246,4 @@ Return the number of coins shares are worth in pool wi
- - - -### Function `to_u128` - - -
fun to_u128(num: u64): u128
-
- - - - -
aborts_if false;
-ensures result == num;
-
- - - - - -### Function `to_u256` - - -
fun to_u256(num: u128): u256
-
- - - - -
aborts_if false;
-ensures result == num;
-
- - [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/ristretto255.md b/aptos-move/framework/aptos-stdlib/doc/ristretto255.md index fa43f810f022a..2160077a7a5d8 100644 --- a/aptos-move/framework/aptos-stdlib/doc/ristretto255.md +++ b/aptos-move/framework/aptos-stdlib/doc/ristretto255.md @@ -126,7 +126,6 @@ In the future, we might support additional features: - [Function `scalar_sub_internal`](#0x1_ristretto255_scalar_sub_internal) - [Function `scalar_neg_internal`](#0x1_ristretto255_scalar_neg_internal) - [Specification](#@Specification_1) - - [Helper functions](#@Helper_functions_2) - [Function `point_equals`](#@Specification_1_point_equals) - [Function `double_scalar_mul`](#@Specification_1_double_scalar_mul) - [Function `multi_scalar_mul`](#@Specification_1_multi_scalar_mul) @@ -153,6 +152,7 @@ In the future, we might support additional features: - [Function `scalar_neg`](#@Specification_1_scalar_neg) - [Function `scalar_neg_assign`](#@Specification_1_scalar_neg_assign) - [Function `scalar_to_bytes`](#@Specification_1_scalar_to_bytes) + - [Helper functions](#@Helper_functions_2) - [Function `new_point_from_sha512_internal`](#@Specification_1_new_point_from_sha512_internal) - [Function `new_point_from_64_uniform_bytes_internal`](#@Specification_1_new_point_from_64_uniform_bytes_internal) - [Function `point_is_canonical_internal`](#@Specification_1_point_is_canonical_internal) @@ -729,7 +729,7 @@ can be used to build a collision-resistant hash function that maps 64-byte messa
public fun new_point_from_64_uniform_bytes(bytes: vector<u8>): Option<RistrettoPoint> {
-    if (std::vector::length(&bytes) == 64) {
+    if (bytes.length() == 64) {
         std::option::some(RistrettoPoint {
             handle: new_point_from_64_uniform_bytes_internal(bytes)
         })
@@ -1168,9 +1168,10 @@ This function is much faster than computing each a_i p_i using point_mulpublic fun multi_scalar_mul(points: &vector<RistrettoPoint>, scalars: &vector<Scalar>): RistrettoPoint {
-    assert!(!std::vector::is_empty(points), std::error::invalid_argument(E_ZERO_POINTS));
-    assert!(!std::vector::is_empty(scalars), std::error::invalid_argument(E_ZERO_SCALARS));
-    assert!(std::vector::length(points) == std::vector::length(scalars), std::error::invalid_argument(E_DIFFERENT_NUM_POINTS_AND_SCALARS));
+    assert!(!points.is_empty(), std::error::invalid_argument(E_ZERO_POINTS));
+    assert!(!scalars.is_empty(), std::error::invalid_argument(E_ZERO_SCALARS));
+    assert!(
+        points.length() == scalars.length(), std::error::invalid_argument(E_DIFFERENT_NUM_POINTS_AND_SCALARS));
 
     RistrettoPoint {
         handle: multi_scalar_mul_internal<RistrettoPoint, Scalar>(points, scalars)
@@ -1286,9 +1287,7 @@ Creates a Scalar from an u8.
 
 
public fun new_scalar_from_u8(byte: u8): Scalar {
     let s = scalar_zero();
-    let byte_zero = std::vector::borrow_mut(&mut s.data, 0);
-    *byte_zero = byte;
-
+    s.data[0] = byte;
     s
 }
 
@@ -1395,7 +1394,7 @@ Creates a Scalar from 32 bytes by reducing the little-endian-encoded number in t
public fun new_scalar_reduced_from_32_bytes(bytes: vector<u8>): Option<Scalar> {
-    if (std::vector::length(&bytes) == 32) {
+    if (bytes.length() == 32) {
         std::option::some(Scalar {
             data: scalar_reduced_from_32_bytes_internal(bytes)
         })
@@ -1427,7 +1426,7 @@ in those bytes modulo $\ell$.
 
 
 
public fun new_scalar_uniform_from_64_bytes(bytes: vector<u8>): Option<Scalar> {
-    if (std::vector::length(&bytes) == 64) {
+    if (bytes.length() == 64) {
         std::option::some(Scalar {
             data: scalar_uniform_from_64_bytes_internal(bytes)
         })
@@ -2424,159 +2423,6 @@ WARNING: This function can only be called with P = RistrettoPoint and S = Scalar
 ## Specification
 
 
-
-
-### Helper functions
-
-
-
-
-
-
-
fun spec_scalar_is_zero(s: Scalar): bool {
-   s.data == x"0000000000000000000000000000000000000000000000000000000000000000"
-}
-
- - - - - - - -
fun spec_scalar_is_one(s: Scalar): bool {
-   s.data == x"0100000000000000000000000000000000000000000000000000000000000000"
-}
-
- - - - - - - -
fun spec_point_is_canonical_internal(bytes: vector<u8>): bool;
-
- - - - - - - -
fun spec_double_scalar_mul_internal(point1: u64, point2: u64, scalar1: vector<u8>, scalar2: vector<u8>): u64;
-
- - - - - - - -
fun spec_multi_scalar_mul_internal<P, S>(points: vector<P>, scalars: vector<S>): u64;
-
- - - - - - - -
fun spec_scalar_is_canonical_internal(s: vector<u8>): bool;
-
- - - - - - - -
fun spec_scalar_from_u64_internal(num: u64): vector<u8>;
-
- - - - - - - -
fun spec_scalar_from_u128_internal(num: u128): vector<u8>;
-
- - - - - - - -
fun spec_scalar_reduced_from_32_bytes_internal(bytes: vector<u8>): vector<u8>;
-
- - - - - - - -
fun spec_scalar_uniform_from_64_bytes_internal(bytes: vector<u8>): vector<u8>;
-
- - - - - - - -
fun spec_scalar_invert_internal(bytes: vector<u8>): vector<u8>;
-
- - - - - - - -
fun spec_scalar_from_sha512_internal(sha2_512_input: vector<u8>): vector<u8>;
-
- - - - - - - -
fun spec_scalar_mul_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
-
- - - - - - - -
fun spec_scalar_add_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
-
- - - - - - - -
fun spec_scalar_sub_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
-
- - - - - - - -
fun spec_scalar_neg_internal(a_bytes: vector<u8>): vector<u8>;
-
- - - ### Function `point_equals` @@ -3020,6 +2866,159 @@ WARNING: This function can only be called with P = RistrettoPoint and S = Scalar + + +### Helper functions + + + + + + +
fun spec_scalar_is_zero(s: Scalar): bool {
+   s.data == x"0000000000000000000000000000000000000000000000000000000000000000"
+}
+
+ + + + + + + +
fun spec_scalar_is_one(s: Scalar): bool {
+   s.data == x"0100000000000000000000000000000000000000000000000000000000000000"
+}
+
+ + + + + + + +
fun spec_point_is_canonical_internal(bytes: vector<u8>): bool;
+
+ + + + + + + +
fun spec_double_scalar_mul_internal(point1: u64, point2: u64, scalar1: vector<u8>, scalar2: vector<u8>): u64;
+
+ + + + + + + +
fun spec_multi_scalar_mul_internal<P, S>(points: vector<P>, scalars: vector<S>): u64;
+
+ + + + + + + +
fun spec_scalar_is_canonical_internal(s: vector<u8>): bool;
+
+ + + + + + + +
fun spec_scalar_from_u64_internal(num: u64): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_from_u128_internal(num: u128): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_reduced_from_32_bytes_internal(bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_uniform_from_64_bytes_internal(bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_invert_internal(bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_from_sha512_internal(sha2_512_input: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_mul_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_add_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_sub_internal(a_bytes: vector<u8>, b_bytes: vector<u8>): vector<u8>;
+
+ + + + + + + +
fun spec_scalar_neg_internal(a_bytes: vector<u8>): vector<u8>;
+
+ + + ### Function `new_point_from_sha512_internal` diff --git a/aptos-move/framework/aptos-stdlib/doc/ristretto255_bulletproofs.md b/aptos-move/framework/aptos-stdlib/doc/ristretto255_bulletproofs.md index 22fe4660646e5..3d7f394dfcbcd 100644 --- a/aptos-move/framework/aptos-stdlib/doc/ristretto255_bulletproofs.md +++ b/aptos-move/framework/aptos-stdlib/doc/ristretto255_bulletproofs.md @@ -9,6 +9,10 @@ A Bulletproof-based zero-knowledge range proof is a proof that a Pedersen commit $c = v G + r H$ commits to an $n$-bit value $v$ (i.e., $v \in [0, 2^n)$). Currently, this module only supports $n \in \{8, 16, 32, 64\}$ for the number of bits. +The module also supports batch range proofs, allowing verification of multiple commitments in a single proof. +Each commitment in the batch must satisfy the same range constraint $v \in [0, 2^n)$, and the supported batch +sizes are limited to $\{1, 2, 4, 8, 16\}$. + - [Struct `RangeProof`](#0x1_ristretto255_bulletproofs_RangeProof) - [Constants](#@Constants_0) @@ -17,9 +21,13 @@ $n \in \{8, 16, 32, 64\}$ for the number of bits. - [Function `range_proof_to_bytes`](#0x1_ristretto255_bulletproofs_range_proof_to_bytes) - [Function `verify_range_proof_pedersen`](#0x1_ristretto255_bulletproofs_verify_range_proof_pedersen) - [Function `verify_range_proof`](#0x1_ristretto255_bulletproofs_verify_range_proof) +- [Function `verify_batch_range_proof_pedersen`](#0x1_ristretto255_bulletproofs_verify_batch_range_proof_pedersen) +- [Function `verify_batch_range_proof`](#0x1_ristretto255_bulletproofs_verify_batch_range_proof) - [Function `verify_range_proof_internal`](#0x1_ristretto255_bulletproofs_verify_range_proof_internal) +- [Function `verify_batch_range_proof_internal`](#0x1_ristretto255_bulletproofs_verify_batch_range_proof_internal) - [Specification](#@Specification_1) - [Function `verify_range_proof_internal`](#@Specification_1_verify_range_proof_internal) + - [Function `verify_batch_range_proof_internal`](#@Specification_1_verify_batch_range_proof_internal)
use 0x1::error;
@@ -69,7 +77,17 @@ Represents a zero-knowledge range proof that a value committed inside a Pedersen
 The native functions have not been rolled out yet.
 
 
-
const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 4;
+
const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 7;
+
+ + + + + +The range proof system only supports batch sizes of 1, 2, 4, 8, and 16. + + +
const E_BATCH_SIZE_NOT_SUPPORTED: u64 = 4;
 
@@ -84,6 +102,16 @@ There was an error deserializing the range proof. + + +The domain separation tag exceeded the 256-byte limit. + + +
const E_DST_TOO_LONG: u64 = 6;
+
+ + + The range proof system only supports proving ranges of type $[0, 2^b)$ where $b \in \{8, 16, 32, 64\}$. @@ -104,6 +132,16 @@ The committed value given to the prover is too large. + + +The vector lengths of values and blinding factors do not match. + + +
const E_VECTOR_LENGTHS_MISMATCH: u64 = 5;
+
+ + + The maximum range supported by the Bulletproofs library is $[0, 2^{64})$. @@ -204,6 +242,8 @@ tag (DST). WARNING: The DST check is VERY important for security as it prevents proofs computed for one application (a.k.a., a _domain_) with dst_1 from verifying in a different application with dst_2 != dst_1. +NOTE: currently, domain separation tags of size larger than 256 bytes are not supported. +
public fun verify_range_proof_pedersen(com: &ristretto255_pedersen::Commitment, proof: &ristretto255_bulletproofs::RangeProof, num_bits: u64, dst: vector<u8>): bool
 
@@ -215,12 +255,10 @@ WARNING: The DST check is VERY important for security as it prevents proofs comp
public fun verify_range_proof_pedersen(com: &pedersen::Commitment, proof: &RangeProof, num_bits: u64, dst: vector<u8>): bool {
-    assert!(features::bulletproofs_enabled(), error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE));
-
-    verify_range_proof_internal(
-        ristretto255::point_to_bytes(&pedersen::commitment_as_compressed_point(com)),
+    verify_range_proof(
+        pedersen::commitment_as_point(com),
         &ristretto255::basepoint(), &ristretto255::hash_to_point_base(),
-        proof.bytes,
+        proof,
         num_bits,
         dst
     )
@@ -236,7 +274,11 @@ WARNING: The DST check is VERY important for security as it prevents proofs comp
 ## Function `verify_range_proof`
 
 Verifies a zero-knowledge range proof that the value v committed in com (as v * val_base + r * rand_base,
-for some randomness r) satisfies v in [0, 2^num_bits). Only works for num_bits in {8, 16, 32, 64}.
+for some randomness r) satisfies v in [0, 2^num_bits).
+
+Only works for num_bits in {8, 16, 32, 64}.
+
+NOTE: currently, domain separation tags of size larger than 256 bytes are not supported.
 
 
 
public fun verify_range_proof(com: &ristretto255::RistrettoPoint, val_base: &ristretto255::RistrettoPoint, rand_base: &ristretto255::RistrettoPoint, proof: &ristretto255_bulletproofs::RangeProof, num_bits: u64, dst: vector<u8>): bool
@@ -254,6 +296,7 @@ for some randomness r) satisfies v in [0, 2^num_
     proof: &RangeProof, num_bits: u64, dst: vector<u8>): bool
 {
     assert!(features::bulletproofs_enabled(), error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE));
+    assert!(dst.length() <= 256, error::invalid_argument(E_DST_TOO_LONG));
 
     verify_range_proof_internal(
         ristretto255::point_to_bytes(&ristretto255::point_compress(com)),
@@ -265,6 +308,87 @@ for some randomness r) satisfies v in [0, 2^num_
 
 
 
+
+ + + +## Function `verify_batch_range_proof_pedersen` + +Verifies a zero-knowledge range proof for a batch of Pedersen commitments comms +(under the default Bulletproofs commitment key; see pedersen::new_commitment_for_bulletproof), +ensuring that all values v satisfy v in [0, 2^num_bits). +Only works for num_bits in {8, 16, 32, 64} and batch size (length of comms) in {1, 2, 4, 8, 16}. + +NOTE: currently, domain separation tags of size larger than 256 bytes are not supported. + + +
public fun verify_batch_range_proof_pedersen(comms: &vector<ristretto255_pedersen::Commitment>, proof: &ristretto255_bulletproofs::RangeProof, num_bits: u64, dst: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun verify_batch_range_proof_pedersen(
+    comms: &vector<pedersen::Commitment>, proof: &RangeProof,
+    num_bits: u64, dst: vector<u8>): bool
+{
+    verify_batch_range_proof(
+        &comms.map_ref(|com| ristretto255::point_clone(pedersen::commitment_as_point(com))),
+        &ristretto255::basepoint(), &ristretto255::hash_to_point_base(),
+        proof,
+        num_bits,
+        dst
+    )
+}
+
+ + + +
+ + + +## Function `verify_batch_range_proof` + +v * val_base + r * rand_base), ensuring that all values v satisfy +v in [0, 2^num_bits). Only works for num_bits in {8, 16, 32, 64} and batch size +(length of the comms) in {1, 2, 4, 8, 16}. + +NOTE: currently, domain separation tags of size larger than 256 bytes are not supported. + + +
public fun verify_batch_range_proof(comms: &vector<ristretto255::RistrettoPoint>, val_base: &ristretto255::RistrettoPoint, rand_base: &ristretto255::RistrettoPoint, proof: &ristretto255_bulletproofs::RangeProof, num_bits: u64, dst: vector<u8>): bool
+
+ + + +
+Implementation + + +
public fun verify_batch_range_proof(
+    comms: &vector<RistrettoPoint>,
+    val_base: &RistrettoPoint, rand_base: &RistrettoPoint,
+    proof: &RangeProof, num_bits: u64, dst: vector<u8>): bool
+{
+    assert!(features::bulletproofs_batch_enabled(), error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE));
+    assert!(dst.length() <= 256, error::invalid_argument(E_DST_TOO_LONG));
+
+    let comms = comms.map_ref(|com| ristretto255::point_to_bytes(&ristretto255::point_compress(com)));
+
+    verify_batch_range_proof_internal(
+        comms,
+        val_base, rand_base,
+        proof.bytes, num_bits, dst
+    )
+}
+
+ + +
@@ -296,6 +420,39 @@ Aborts with + +## Function `verify_batch_range_proof_internal` + +Aborts with error::invalid_argument(E_DESERIALIZE_RANGE_PROOF) if proof is not a valid serialization of a +range proof. +Aborts with error::invalid_argument(E_RANGE_NOT_SUPPORTED) if an unsupported num_bits is provided. +Aborts with error::invalid_argument(E_BATCH_SIZE_NOT_SUPPORTED) if an unsupported batch size is provided. +Aborts with error::invalid_argument(E_VECTOR_LENGTHS_MISMATCH) if the vector lengths of comms and proof do not match. + + +
fun verify_batch_range_proof_internal(comms: vector<vector<u8>>, val_base: &ristretto255::RistrettoPoint, rand_base: &ristretto255::RistrettoPoint, proof: vector<u8>, num_bits: u64, dst: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun verify_batch_range_proof_internal(
+    comms: vector<vector<u8>>,
+    val_base: &RistrettoPoint,
+    rand_base: &RistrettoPoint,
+    proof: vector<u8>,
+    num_bits: u64,
+    dst: vector<u8>): bool;
+
+ + +
@@ -314,6 +471,22 @@ Aborts with + +### Function `verify_batch_range_proof_internal` + + +
fun verify_batch_range_proof_internal(comms: vector<vector<u8>>, val_base: &ristretto255::RistrettoPoint, rand_base: &ristretto255::RistrettoPoint, proof: vector<u8>, num_bits: u64, dst: vector<u8>): bool
+
+ + + +
pragma opaque;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/ristretto255_elgamal.md b/aptos-move/framework/aptos-stdlib/doc/ristretto255_elgamal.md index 30f2644660598..e284e9d697a90 100644 --- a/aptos-move/framework/aptos-stdlib/doc/ristretto255_elgamal.md +++ b/aptos-move/framework/aptos-stdlib/doc/ristretto255_elgamal.md @@ -160,9 +160,9 @@ Creates a new public key from a serialized Ristretto255 point.
public fun new_pubkey_from_bytes(bytes: vector<u8>): Option<CompressedPubkey> {
     let point = ristretto255::new_compressed_point_from_bytes(bytes);
-    if (std::option::is_some(&mut point)) {
+    if (point.is_some()) {
         let pk = CompressedPubkey {
-            point: std::option::extract(&mut point)
+            point: point.extract()
         };
         std::option::some(pk)
     } else {
@@ -268,19 +268,19 @@ next 32 bytes store v * G + r * Y, where Y is the publ
 
 
 
public fun new_ciphertext_from_bytes(bytes: vector<u8>): Option<Ciphertext> {
-    if(vector::length(&bytes) != 64) {
+    if(bytes.length() != 64) {
         return std::option::none<Ciphertext>()
     };
 
-    let bytes_right = vector::trim(&mut bytes, 32);
+    let bytes_right = bytes.trim(32);
 
     let left_point = ristretto255::new_point_from_bytes(bytes);
     let right_point = ristretto255::new_point_from_bytes(bytes_right);
 
-    if (std::option::is_some<RistrettoPoint>(&mut left_point) && std::option::is_some<RistrettoPoint>(&mut right_point)) {
+    if (left_point.is_some::<RistrettoPoint>() && right_point.is_some::<RistrettoPoint>()) {
         std::option::some<Ciphertext>(Ciphertext {
-            left: std::option::extract<RistrettoPoint>(&mut left_point),
-            right: std::option::extract<RistrettoPoint>(&mut right_point)
+            left: left_point.extract::<RistrettoPoint>(),
+            right: right_point.extract::<RistrettoPoint>()
         })
     } else {
         std::option::none<Ciphertext>()
@@ -397,8 +397,8 @@ Given a ciphertext ct, serializes that ciphertext into bytes.
     let bytes_left = ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.left));
     let bytes_right = ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.right));
     let bytes = vector::empty<u8>();
-    vector::append<u8>(&mut bytes, bytes_left);
-    vector::append<u8>(&mut bytes, bytes_right);
+    bytes.append::<u8>(bytes_left);
+    bytes.append::<u8>(bytes_right);
     bytes
 }
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/ristretto255_pedersen.md b/aptos-move/framework/aptos-stdlib/doc/ristretto255_pedersen.md index 0c870c99dc80e..7dc21cc010595 100644 --- a/aptos-move/framework/aptos-stdlib/doc/ristretto255_pedersen.md +++ b/aptos-move/framework/aptos-stdlib/doc/ristretto255_pedersen.md @@ -99,9 +99,9 @@ Creates a new public key from a serialized Ristretto255 point.
public fun new_commitment_from_bytes(bytes: vector<u8>): Option<Commitment> {
     let point = ristretto255::new_point_from_bytes(bytes);
-    if (std::option::is_some(&mut point)) {
+    if (point.is_some()) {
         let comm = Commitment {
-            point: std::option::extract(&mut point)
+            point: point.extract()
         };
         std::option::some(comm)
     } else {
@@ -266,7 +266,7 @@ base used in the Bulletproofs library (i.e., new_commitment_for_bulletproof(v: &Scalar, r: &Scalar): Commitment {
     let rand_base = ristretto255::new_point_from_bytes(BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE);
-    let rand_base = std::option::extract(&mut rand_base);
+    let rand_base = rand_base.extract();
 
     Commitment {
         point: ristretto255::basepoint_double_mul(r, &rand_base, v)
@@ -561,7 +561,7 @@ Bulletproofs Move module.
 
 
 
public fun randomness_base_for_bulletproof(): RistrettoPoint {
-    std::option::extract(&mut ristretto255::new_point_from_bytes(BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE))
+    ristretto255::new_point_from_bytes(BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE).extract()
 }
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/secp2561r.md b/aptos-move/framework/aptos-stdlib/doc/secp2561r.md new file mode 100644 index 0000000000000..c86cd42bb2628 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/secp2561r.md @@ -0,0 +1,135 @@ + + + +# Module `0x1::secp256r1` + +This module implements ECDSA signatures based on the prime-order secp256r1 ellptic curve (i.e., cofactor is 1). + + +- [Struct `ECDSARawPublicKey`](#0x1_secp256r1_ECDSARawPublicKey) +- [Constants](#@Constants_0) +- [Function `ecdsa_raw_public_key_from_64_bytes`](#0x1_secp256r1_ecdsa_raw_public_key_from_64_bytes) +- [Function `ecdsa_raw_public_key_to_bytes`](#0x1_secp256r1_ecdsa_raw_public_key_to_bytes) +- [Specification](#@Specification_1) + + +
use 0x1::error;
+
+ + + + + +## Struct `ECDSARawPublicKey` + +A 64-byte ECDSA public key. + + +
struct ECDSARawPublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +An error occurred while deserializing, for example due to wrong input size. + + +
const E_DESERIALIZE: u64 = 1;
+
+ + + + + +The size of a secp256k1-based ECDSA public key, in bytes. + + +
const RAW_PUBLIC_KEY_NUM_BYTES: u64 = 64;
+
+ + + + + +## Function `ecdsa_raw_public_key_from_64_bytes` + +Constructs an ECDSARawPublicKey struct, given a 64-byte raw representation. + + +
public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector<u8>): secp256r1::ECDSARawPublicKey
+
+ + + +
+Implementation + + +
public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector<u8>): ECDSARawPublicKey {
+    assert!(bytes.length() == RAW_PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE));
+    ECDSARawPublicKey { bytes }
+}
+
+ + + +
+ + + +## Function `ecdsa_raw_public_key_to_bytes` + +Serializes an ECDSARawPublicKey struct to 64-bytes. + + +
public fun ecdsa_raw_public_key_to_bytes(pk: &secp256r1::ECDSARawPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun ecdsa_raw_public_key_to_bytes(pk: &ECDSARawPublicKey): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/secp256k1.md b/aptos-move/framework/aptos-stdlib/doc/secp256k1.md index 88863d0caf223..e2269693c40fc 100644 --- a/aptos-move/framework/aptos-stdlib/doc/secp256k1.md +++ b/aptos-move/framework/aptos-stdlib/doc/secp256k1.md @@ -121,6 +121,16 @@ The size of a secp256k1-based ECDSA public key, in bytes. + + +Recovery ID needs to be either 0, 1, 2 or 3. If you are recovering from an (r, s, v) Ethereum signature, take its v value and, set the recovery_id as follows: if v == 27, set to 0, if v == 28, set to 1, if v == 37, set to 0, if v == 38, set to 1. + + +
const E_BAD_RECOVERY_ID: u64 = 2;
+
+ + + ## Function `ecdsa_signature_from_bytes` @@ -138,7 +148,7 @@ Constructs an ECDSASignature struct from the given 64 bytes.
public fun ecdsa_signature_from_bytes(bytes: vector<u8>): ECDSASignature {
-    assert!(std::vector::length(&bytes) == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE));
+    assert!(bytes.length() == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE));
     ECDSASignature { bytes }
 }
 
@@ -164,7 +174,7 @@ Constructs an ECDSARawPublicKey struct, given a 64-byte raw representation.
public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector<u8>): ECDSARawPublicKey {
-    assert!(std::vector::length(&bytes) == RAW_PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE));
+    assert!(bytes.length() == RAW_PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE));
     ECDSARawPublicKey { bytes }
 }
 
@@ -227,12 +237,24 @@ Serializes an ECDSASignature struct to 64-bytes. ## Function `ecdsa_recover` -Recovers the signer's raw (64-byte) public key from a secp256k1 ECDSA signature given the recovery_id and the signed -message (32 byte digest). +Recovers the signer's raw (64-byte) public key from a secp256k1 ECDSA signature given the (2-bit) recovery_id +and the signed message (32 byte digest). + +This recovery algorithm can only be used to check validity of a signature if the signer's public key (or its +hash) is known beforehand. When the algorithm returns a public key pk, this means that the signature in +signature verified on message under that pk. But, again, that is only meaningful if pk is the "right" +one (e.g., in Ethereum, the "right" pk is the one whose hash matches the account's address). + +If you do not understand this nuance, please learn more about ECDSA and pubkey recovery (see +https://alinush.github.io/ecdsa#pubkey-recovery), or you risk writing completely-insecure code. -Note that an invalid signature, or a signature from a different message, will result in the recovery of an -incorrect public key. This recovery algorithm can only be used to check validity of a signature if the signer's -public key (or its hash) is known beforehand. +Note: This function does not apply any additional hashing on the message; it simply passes in the message as +raw bytes to the ECDSA recovery function. (The max allowed size ~32 bytes.) ++ Nonetheless, most applications will first hash the message to be signed. So, typically, message here tends +to be a hash rather than an actual message. Therefore, the developer should be aware of what hash function +was used for this. ++ In particular, if using this function to verify an Ethereum signature, you will likely have to input +a keccak256 hash of the message as the message parameter.
public fun ecdsa_recover(message: vector<u8>, recovery_id: u8, signature: &secp256k1::ECDSASignature): option::Option<secp256k1::ECDSARawPublicKey>
@@ -249,7 +271,14 @@ public key (or its hash) is known beforehand.
     recovery_id: u8,
     signature: &ECDSASignature,
 ): Option<ECDSARawPublicKey> {
+
+    // If recovery ID is not 0 or 1 or 2 or 3, help the caller out by aborting with `E_BAD_RECOVERY_ID`
+    if(recovery_id != 0 && recovery_id != 1 && recovery_id != 2 && recovery_id != 3) {
+        abort std::error::invalid_argument(E_BAD_RECOVERY_ID);
+    };
+
     let (pk, success) = ecdsa_recover_internal(message, recovery_id, signature.bytes);
+
     if (success) {
         std::option::some(ecdsa_raw_public_key_from_64_bytes(pk))
     } else {
@@ -374,7 +403,8 @@ and returns ([], false) otherwise.
 
 
 
-
aborts_if ecdsa_recover_internal_abort_condition(message, recovery_id, signature.bytes);
+
aborts_if recovery_id > 3;
+aborts_if ecdsa_recover_internal_abort_condition(message, recovery_id, signature.bytes);
 let pk = spec_ecdsa_recover_internal_result_1(message, recovery_id, signature.bytes);
 let success = spec_ecdsa_recover_internal_result_2(message, recovery_id, signature.bytes);
 ensures success ==> result == std::option::spec_some(ecdsa_raw_public_key_from_64_bytes(pk));
diff --git a/aptos-move/framework/aptos-stdlib/doc/secp256r1.md b/aptos-move/framework/aptos-stdlib/doc/secp256r1.md
new file mode 100644
index 0000000000000..ff5e6d336be51
--- /dev/null
+++ b/aptos-move/framework/aptos-stdlib/doc/secp256r1.md
@@ -0,0 +1,135 @@
+
+
+
+# Module `0x1::secp256r1`
+
+This module implements ECDSA signatures based on the prime-order secp256r1 ellptic curve (i.e., cofactor is 1).
+
+
+-  [Struct `ECDSARawPublicKey`](#0x1_secp256r1_ECDSARawPublicKey)
+-  [Constants](#@Constants_0)
+-  [Function `ecdsa_raw_public_key_from_64_bytes`](#0x1_secp256r1_ecdsa_raw_public_key_from_64_bytes)
+-  [Function `ecdsa_raw_public_key_to_bytes`](#0x1_secp256r1_ecdsa_raw_public_key_to_bytes)
+-  [Specification](#@Specification_1)
+
+
+
use 0x1::error;
+
+ + + + + +## Struct `ECDSARawPublicKey` + +A 64-byte ECDSA public key. + + +
struct ECDSARawPublicKey has copy, drop, store
+
+ + + +
+Fields + + +
+
+bytes: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +An error occurred while deserializing, for example due to wrong input size. + + +
const E_DESERIALIZE: u64 = 1;
+
+ + + + + +The size of a secp256k1-based ECDSA public key, in bytes. + + +
const RAW_PUBLIC_KEY_NUM_BYTES: u64 = 64;
+
+ + + + + +## Function `ecdsa_raw_public_key_from_64_bytes` + +Constructs an ECDSARawPublicKey struct, given a 64-byte raw representation. + + +
public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector<u8>): secp256r1::ECDSARawPublicKey
+
+ + + +
+Implementation + + +
public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector<u8>): ECDSARawPublicKey {
+    assert!(bytes.length() == RAW_PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE));
+    ECDSARawPublicKey { bytes }
+}
+
+ + + +
+ + + +## Function `ecdsa_raw_public_key_to_bytes` + +Serializes an ECDSARawPublicKey struct to 64-bytes. + + +
public fun ecdsa_raw_public_key_to_bytes(pk: &secp256r1::ECDSARawPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun ecdsa_raw_public_key_to_bytes(pk: &ECDSARawPublicKey): vector<u8> {
+    pk.bytes
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/simple_map.md b/aptos-move/framework/aptos-stdlib/doc/simple_map.md index ea5040273580b..931143f9ebdc8 100644 --- a/aptos-move/framework/aptos-stdlib/doc/simple_map.md +++ b/aptos-move/framework/aptos-stdlib/doc/simple_map.md @@ -10,6 +10,9 @@ This module provides a solution for unsorted maps, that is it has the properties 4) The keys are unsorted. 5) Adds and removals take O(N) time +DEPRECATED: since it's implementation is inneficient, it +has been deprecated in favor of ordered_map.move. + - [Struct `SimpleMap`](#0x1_simple_map_SimpleMap) - [Struct `Element`](#0x1_simple_map_Element) @@ -62,6 +65,8 @@ This module provides a solution for unsorted maps, that is it has the properties ## Struct `SimpleMap` +DEPRECATED: since it's implementation is inneficient, it +has been deprecated in favor of ordered_map.move.
struct SimpleMap<Key, Value> has copy, drop, store
@@ -149,7 +154,7 @@ Map key is not found
 
 
 
-
public fun length<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>): u64
+
public fun length<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>): u64
 
@@ -158,8 +163,8 @@ Map key is not found Implementation -
public fun length<Key: store, Value: store>(map: &SimpleMap<Key, Value>): u64 {
-    vector::length(&map.data)
+
public fun length<Key: store, Value: store>(self: &SimpleMap<Key, Value>): u64 {
+    self.data.length()
 }
 
@@ -215,7 +220,7 @@ Create a SimpleMap from a vector of keys and values. The keys must be unique. values: vector<Value>, ): SimpleMap<Key, Value> { let map = new(); - add_all(&mut map, keys, values); + map.add_all(keys, values); map }
@@ -257,7 +262,7 @@ This function is deprecated, use new instead. -
public fun borrow<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
+
public fun borrow<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
 
@@ -267,13 +272,13 @@ This function is deprecated, use new instead.
public fun borrow<Key: store, Value: store>(
-    map: &SimpleMap<Key, Value>,
+    self: &SimpleMap<Key, Value>,
     key: &Key,
 ): &Value {
-    let maybe_idx = find(map, key);
-    assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
-    let idx = option::extract(&mut maybe_idx);
-    &vector::borrow(&map.data, idx).value
+    let maybe_idx = self.find(key);
+    assert!(maybe_idx.is_some(), error::invalid_argument(EKEY_NOT_FOUND));
+    let idx = maybe_idx.extract();
+    &self.data.borrow(idx).value
 }
 
@@ -287,7 +292,7 @@ This function is deprecated, use new instead. -
public fun borrow_mut<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
+
public fun borrow_mut<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
 
@@ -297,13 +302,13 @@ This function is deprecated, use new instead.
public fun borrow_mut<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     key: &Key,
 ): &mut Value {
-    let maybe_idx = find(map, key);
-    assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
-    let idx = option::extract(&mut maybe_idx);
-    &mut vector::borrow_mut(&mut map.data, idx).value
+    let maybe_idx = self.find(key);
+    assert!(maybe_idx.is_some(), error::invalid_argument(EKEY_NOT_FOUND));
+    let idx = maybe_idx.extract();
+    &mut self.data.borrow_mut(idx).value
 }
 
@@ -317,7 +322,7 @@ This function is deprecated, use new instead. -
public fun contains_key<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
+
public fun contains_key<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
 
@@ -327,11 +332,11 @@ This function is deprecated, use new instead.
public fun contains_key<Key: store, Value: store>(
-    map: &SimpleMap<Key, Value>,
+    self: &SimpleMap<Key, Value>,
     key: &Key,
 ): bool {
-    let maybe_idx = find(map, key);
-    option::is_some(&maybe_idx)
+    let maybe_idx = self.find(key);
+    maybe_idx.is_some()
 }
 
@@ -345,7 +350,7 @@ This function is deprecated, use new instead. -
public fun destroy_empty<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>)
+
public fun destroy_empty<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>)
 
@@ -354,9 +359,9 @@ This function is deprecated, use new instead. Implementation -
public fun destroy_empty<Key: store, Value: store>(map: SimpleMap<Key, Value>) {
-    let SimpleMap { data } = map;
-    vector::destroy_empty(data);
+
public fun destroy_empty<Key: store, Value: store>(self: SimpleMap<Key, Value>) {
+    let SimpleMap { data } = self;
+    data.destroy_empty();
 }
 
@@ -371,7 +376,7 @@ This function is deprecated, use new instead. Add a key/value pair to the map. The key must not already exist. -
public fun add<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
+
public fun add<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
 
@@ -381,14 +386,14 @@ Add a key/value pair to the map. The key must not already exist.
public fun add<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     key: Key,
     value: Value,
 ) {
-    let maybe_idx = find(map, &key);
-    assert!(option::is_none(&maybe_idx), error::invalid_argument(EKEY_ALREADY_EXISTS));
+    let maybe_idx = self.find(&key);
+    assert!(maybe_idx.is_none(), error::invalid_argument(EKEY_ALREADY_EXISTS));
 
-    vector::push_back(&mut map.data, Element { key, value });
+    self.data.push_back(Element { key, value });
 }
 
@@ -403,7 +408,7 @@ Add a key/value pair to the map. The key must not already exist. Add multiple key/value pairs to the map. The keys must not already exist. -
public fun add_all<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
+
public fun add_all<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
 
@@ -413,12 +418,12 @@ Add multiple key/value pairs to the map. The keys must not already exist.
public fun add_all<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     keys: vector<Key>,
     values: vector<Value>,
 ) {
-    vector::zip(keys, values, |key, value| {
-        add(map, key, value);
+    keys.zip(values, |key, value| {
+        self.add(key, value);
     });
 }
 
@@ -434,7 +439,7 @@ Add multiple key/value pairs to the map. The keys must not already exist. Insert key/value pair or update an existing key to a new value -
public fun upsert<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
+
public fun upsert<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
 
@@ -444,24 +449,22 @@ Insert key/value pair or update an existing key to a new value
public fun upsert<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     key: Key,
     value: Value
 ): (std::option::Option<Key>, std::option::Option<Value>) {
-    let data = &mut map.data;
-    let len = vector::length(data);
-    let i = 0;
-    while (i < len) {
-        let element = vector::borrow(data, i);
+    let data = &mut self.data;
+    let len = data.length();
+    for (i in 0..len) {
+        let element = data.borrow(i);
         if (&element.key == &key) {
-            vector::push_back(data, Element { key, value });
-            vector::swap(data, i, len);
-            let Element { key, value } = vector::pop_back(data);
+            data.push_back(Element { key, value });
+            data.swap(i, len);
+            let Element { key, value } = data.pop_back();
             return (std::option::some(key), std::option::some(value))
         };
-        i = i + 1;
     };
-    vector::push_back(&mut map.data, Element { key, value });
+    self.data.push_back(Element { key, value });
     (std::option::none(), std::option::none())
 }
 
@@ -477,7 +480,7 @@ Insert key/value pair or update an existing key to a new value Return all keys in the map. This requires keys to be copyable. -
public fun keys<Key: copy, Value>(map: &simple_map::SimpleMap<Key, Value>): vector<Key>
+
public fun keys<Key: copy, Value>(self: &simple_map::SimpleMap<Key, Value>): vector<Key>
 
@@ -486,9 +489,8 @@ Return all keys in the map. This requires keys to be copyable. Implementation -
public fun keys<Key: copy, Value>(map: &SimpleMap<Key, Value>): vector<Key> {
-    vector::map_ref(&map.data, |e| {
-        let e: &Element<Key, Value> = e;
+
public fun keys<Key: copy, Value>(self: &SimpleMap<Key, Value>): vector<Key> {
+    self.data.map_ref(|e| {
         e.key
     })
 }
@@ -505,7 +507,7 @@ Return all keys in the map. This requires keys to be copyable.
 Return all values in the map. This requires values to be copyable.
 
 
-
public fun values<Key, Value: copy>(map: &simple_map::SimpleMap<Key, Value>): vector<Value>
+
public fun values<Key, Value: copy>(self: &simple_map::SimpleMap<Key, Value>): vector<Value>
 
@@ -514,9 +516,8 @@ Return all values in the map. This requires values to be copyable. Implementation -
public fun values<Key, Value: copy>(map: &SimpleMap<Key, Value>): vector<Value> {
-    vector::map_ref(&map.data, |e| {
-        let e: &Element<Key, Value> = e;
+
public fun values<Key, Value: copy>(self: &SimpleMap<Key, Value>): vector<Value> {
+    self.data.map_ref(|e| {
         e.value
     })
 }
@@ -534,7 +535,7 @@ Transform the map into two vectors with the keys and values respectively
 Primarily used to destroy a map
 
 
-
public fun to_vec_pair<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
+
public fun to_vec_pair<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
 
@@ -544,14 +545,14 @@ Primarily used to destroy a map
public fun to_vec_pair<Key: store, Value: store>(
-    map: SimpleMap<Key, Value>): (vector<Key>, vector<Value>) {
+    self: SimpleMap<Key, Value>): (vector<Key>, vector<Value>) {
     let keys: vector<Key> = vector::empty();
     let values: vector<Value> = vector::empty();
-    let SimpleMap { data } = map;
-    vector::for_each(data, |e| {
+    let SimpleMap { data } = self;
+    data.for_each(|e| {
         let Element { key, value } = e;
-        vector::push_back(&mut keys, key);
-        vector::push_back(&mut values, value);
+        keys.push_back(key);
+        values.push_back(value);
     });
     (keys, values)
 }
@@ -569,7 +570,7 @@ For maps that cannot be dropped this is a utility to destroy them
 using lambdas to destroy the individual keys and values.
 
 
-
public fun destroy<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>, dk: |Key|, dv: |Value|)
+
public fun destroy<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>, dk: |Key|, dv: |Value|)
 
@@ -579,13 +580,13 @@ using lambdas to destroy the individual keys and values.
public inline fun destroy<Key: store, Value: store>(
-    map: SimpleMap<Key, Value>,
+    self: SimpleMap<Key, Value>,
     dk: |Key|,
     dv: |Value|
 ) {
-    let (keys, values) = to_vec_pair(map);
-    vector::destroy(keys, |_k| dk(_k));
-    vector::destroy(values, |_v| dv(_v));
+    let (keys, values) = self.to_vec_pair();
+    keys.destroy(|_k| dk(_k));
+    values.destroy(|_v| dv(_v));
 }
 
@@ -600,7 +601,7 @@ using lambdas to destroy the individual keys and values. Remove a key/value pair from the map. The key must exist. -
public fun remove<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
+
public fun remove<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
 
@@ -610,13 +611,13 @@ Remove a key/value pair from the map. The key must exist.
public fun remove<Key: store, Value: store>(
-    map: &mut SimpleMap<Key, Value>,
+    self: &mut SimpleMap<Key, Value>,
     key: &Key,
 ): (Key, Value) {
-    let maybe_idx = find(map, key);
-    assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND));
-    let placement = option::extract(&mut maybe_idx);
-    let Element { key, value } = vector::swap_remove(&mut map.data, placement);
+    let maybe_idx = self.find(key);
+    assert!(maybe_idx.is_some(), error::invalid_argument(EKEY_NOT_FOUND));
+    let placement = maybe_idx.extract();
+    let Element { key, value } = self.data.swap_remove(placement);
     (key, value)
 }
 
@@ -631,7 +632,7 @@ Remove a key/value pair from the map. The key must exist. -
fun find<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
+
fun find<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
 
@@ -641,17 +642,15 @@ Remove a key/value pair from the map. The key must exist.
fun find<Key: store, Value: store>(
-    map: &SimpleMap<Key, Value>,
+    self: &SimpleMap<Key, Value>,
     key: &Key,
 ): option::Option<u64> {
-    let leng = vector::length(&map.data);
-    let i = 0;
-    while (i < leng) {
-        let element = vector::borrow(&map.data, i);
+    let len = self.data.length();
+    for (i in 0..len) {
+        let element = self.data.borrow(i);
         if (&element.key == key) {
             return option::some(i)
         };
-        i = i + 1;
     };
     option::none<u64>()
 }
@@ -710,7 +709,7 @@ Remove a key/value pair from the map. The key must exist.
 ### Function `length`
 
 
-
public fun length<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>): u64
+
public fun length<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>): u64
 
@@ -758,7 +757,7 @@ Remove a key/value pair from the map. The key must exist. ensures [abstract] spec_len(result) == len(keys); ensures [abstract] forall k: Key: spec_contains_key(result, k) <==> vector::spec_contains(keys, k); ensures [abstract] forall i in 0..len(keys): - spec_get(result, vector::borrow(keys, i)) == vector::borrow(values, i); + spec_get(result, keys.borrow(i)) == values.borrow(i);
@@ -785,7 +784,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `borrow` -
public fun borrow<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
+
public fun borrow<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): &Value
 
@@ -801,7 +800,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `borrow_mut` -
public fun borrow_mut<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
+
public fun borrow_mut<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): &mut Value
 
@@ -817,7 +816,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `contains_key` -
public fun contains_key<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
+
public fun contains_key<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): bool
 
@@ -833,7 +832,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `destroy_empty` -
public fun destroy_empty<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>)
+
public fun destroy_empty<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>)
 
@@ -849,7 +848,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `add` -
public fun add<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
+
public fun add<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value)
 
@@ -865,7 +864,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `add_all` -
public fun add_all<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
+
public fun add_all<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, keys: vector<Key>, values: vector<Value>)
 
@@ -881,7 +880,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `upsert` -
public fun upsert<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
+
public fun upsert<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: Key, value: Value): (option::Option<Key>, option::Option<Value>)
 
@@ -890,12 +889,14 @@ Remove a key/value pair from the map. The key must exist.
pragma intrinsic;
 pragma opaque;
 aborts_if [abstract] false;
-ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_1);
-ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_2);
-ensures [abstract] spec_contains_key(map, key);
-ensures [abstract] spec_get(map, key) == value;
-ensures [abstract] spec_contains_key(old(map), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key));
-ensures [abstract] spec_contains_key(old(map), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old(map), key)));
+ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_1);
+ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_2);
+ensures [abstract] spec_contains_key(self, key);
+ensures [abstract] spec_get(self, key) == value;
+ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key));
+ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old(
+    self
+), key)));
 
@@ -950,7 +951,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `keys` -
public fun keys<Key: copy, Value>(map: &simple_map::SimpleMap<Key, Value>): vector<Key>
+
public fun keys<Key: copy, Value>(self: &simple_map::SimpleMap<Key, Value>): vector<Key>
 
@@ -966,7 +967,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `values` -
public fun values<Key, Value: copy>(map: &simple_map::SimpleMap<Key, Value>): vector<Value>
+
public fun values<Key, Value: copy>(self: &simple_map::SimpleMap<Key, Value>): vector<Value>
 
@@ -982,7 +983,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `to_vec_pair` -
public fun to_vec_pair<Key: store, Value: store>(map: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
+
public fun to_vec_pair<Key: store, Value: store>(self: simple_map::SimpleMap<Key, Value>): (vector<Key>, vector<Value>)
 
@@ -992,9 +993,9 @@ Remove a key/value pair from the map. The key must exist. pragma opaque; ensures [abstract] forall k: Key: vector::spec_contains(result_1, k) <==> - spec_contains_key(map, k); + spec_contains_key(self, k); ensures [abstract] forall i in 0..len(result_1): - spec_get(map, vector::borrow(result_1, i)) == vector::borrow(result_2, i); + spec_get(self, result_1.borrow(i)) == result_2.borrow(i);
@@ -1004,7 +1005,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `remove` -
public fun remove<Key: store, Value: store>(map: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
+
public fun remove<Key: store, Value: store>(self: &mut simple_map::SimpleMap<Key, Value>, key: &Key): (Key, Value)
 
@@ -1020,7 +1021,7 @@ Remove a key/value pair from the map. The key must exist. ### Function `find` -
fun find<Key: store, Value: store>(map: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
+
fun find<Key: store, Value: store>(self: &simple_map::SimpleMap<Key, Value>, key: &Key): option::Option<u64>
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/single_key.md b/aptos-move/framework/aptos-stdlib/doc/single_key.md new file mode 100644 index 0000000000000..0c1a6e029a1c4 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/single_key.md @@ -0,0 +1,410 @@ + + + +# Module `0x1::single_key` + +This module implements Single Key representations of public keys. +It is used to represent public keys for the Ed25519, SECP256K1, WebAuthn, and Keyless schemes in a unified way. + + +- [Enum `AnyPublicKey`](#0x1_single_key_AnyPublicKey) +- [Constants](#@Constants_0) +- [Function `new_public_key_from_bytes`](#0x1_single_key_new_public_key_from_bytes) +- [Function `deserialize_any_public_key`](#0x1_single_key_deserialize_any_public_key) +- [Function `is_keyless_or_federated_keyless_public_key`](#0x1_single_key_is_keyless_or_federated_keyless_public_key) +- [Function `from_ed25519_public_key_unvalidated`](#0x1_single_key_from_ed25519_public_key_unvalidated) +- [Function `to_authentication_key`](#0x1_single_key_to_authentication_key) +- [Specification](#@Specification_1) + + +
use 0x1::bcs;
+use 0x1::bcs_stream;
+use 0x1::ed25519;
+use 0x1::error;
+use 0x1::federated_keyless;
+use 0x1::hash;
+use 0x1::keyless;
+use 0x1::secp256k1;
+use 0x1::secp256r1;
+
+ + + + + +## Enum `AnyPublicKey` + + + +
enum AnyPublicKey has copy, drop, store
+
+ + + +
+Variants + + +
+Ed25519 + + +
+Fields + + +
+
+pk: ed25519::UnvalidatedPublicKey +
+
+ +
+
+ + +
+ +
+ +
+Secp256k1Ecdsa + + +
+Fields + + +
+
+pk: secp256k1::ECDSARawPublicKey +
+
+ +
+
+ + +
+ +
+ +
+Secp256r1Ecdsa + + +
+Fields + + +
+
+pk: secp256r1::ECDSARawPublicKey +
+
+ +
+
+ + +
+ +
+ +
+Keyless + + +
+Fields + + +
+
+pk: keyless::PublicKey +
+
+ +
+
+ + +
+ +
+ +
+FederatedKeyless + + +
+Fields + + +
+
+pk: federated_keyless::PublicKey +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Constants + + + + +The identifier of the Single Key signature scheme, which is used when deriving Aptos authentication keys by hashing +it together with an Single Key public key. + + +
const SIGNATURE_SCHEME_ID: u8 = 2;
+
+ + + + + +Scheme identifier for Ed25519 single keys. + + +
const ED25519_PUBLIC_KEY_TYPE: u8 = 0;
+
+ + + + + +Unrecognized public key type. + + +
const E_INVALID_PUBLIC_KEY_TYPE: u64 = 1;
+
+ + + + + +There are extra bytes in the input when deserializing a Single Key public key. + + +
const E_INVALID_SINGLE_KEY_EXTRA_BYTES: u64 = 2;
+
+ + + + + +Scheme identifier for Federated Keyless single keys. + + +
const FEDERATED_KEYLESS_PUBLIC_KEY_TYPE: u8 = 4;
+
+ + + + + +Scheme identifier for Keyless single keys. + + +
const KEYLESS_PUBLIC_KEY_TYPE: u8 = 3;
+
+ + + + + +Scheme identifier for SECP256K1 single keys. + + +
const SECP256K1_PUBLIC_KEY_TYPE: u8 = 1;
+
+ + + + + +Scheme identifier for WebAuthn single keys. + + +
const WEB_AUTHN_PUBLIC_KEY_TYPE: u8 = 2;
+
+ + + + + +## Function `new_public_key_from_bytes` + +Parses the input bytes as a AnyPublicKey. The public key bytes are not guaranteed to be a valid +representation of a point on its corresponding curve if applicable. +It does check that the bytes deserialize into a well-formed public key for the given scheme. + + +
public fun new_public_key_from_bytes(bytes: vector<u8>): single_key::AnyPublicKey
+
+ + + +
+Implementation + + +
public fun new_public_key_from_bytes(bytes: vector<u8>): AnyPublicKey {
+    let stream = bcs_stream::new(bytes);
+    let pk = deserialize_any_public_key(&mut stream);
+    assert!(!bcs_stream::has_remaining(&mut stream), error::invalid_argument(E_INVALID_SINGLE_KEY_EXTRA_BYTES));
+    pk
+}
+
+ + + +
+ + + +## Function `deserialize_any_public_key` + +Deserializes a Single Key public key from a BCS stream. + + +
public fun deserialize_any_public_key(stream: &mut bcs_stream::BCSStream): single_key::AnyPublicKey
+
+ + + +
+Implementation + + +
public fun deserialize_any_public_key(stream: &mut bcs_stream::BCSStream): AnyPublicKey {
+    let scheme_id = bcs_stream::deserialize_u8(stream);
+    let pk: AnyPublicKey;
+    if (scheme_id == ED25519_PUBLIC_KEY_TYPE) {
+        let public_key_bytes = bcs_stream::deserialize_vector(stream, |x| deserialize_u8(x));
+        pk = AnyPublicKey::Ed25519{pk: ed25519::new_unvalidated_public_key_from_bytes(public_key_bytes)}
+    } else if (scheme_id == SECP256K1_PUBLIC_KEY_TYPE) {
+        let public_key_bytes = bcs_stream::deserialize_vector(stream, |x| deserialize_u8(x));
+        pk = AnyPublicKey::Secp256k1Ecdsa{pk: secp256k1::ecdsa_raw_public_key_from_64_bytes(public_key_bytes)};
+    } else if (scheme_id == WEB_AUTHN_PUBLIC_KEY_TYPE) {
+        let public_key_bytes = bcs_stream::deserialize_vector(stream, |x| deserialize_u8(x));
+        pk = AnyPublicKey::Secp256r1Ecdsa{pk: secp256r1::ecdsa_raw_public_key_from_64_bytes(public_key_bytes)};
+    } else if (scheme_id == KEYLESS_PUBLIC_KEY_TYPE) {
+        pk = AnyPublicKey::Keyless{pk: keyless::deserialize_public_key(stream)};
+    } else if (scheme_id == FEDERATED_KEYLESS_PUBLIC_KEY_TYPE) {
+        pk = AnyPublicKey::FederatedKeyless{pk: federated_keyless::deserialize_public_key(stream)}
+    } else {
+        abort error::invalid_argument(E_INVALID_PUBLIC_KEY_TYPE);
+    };
+    pk
+}
+
+ + + +
+ + + +## Function `is_keyless_or_federated_keyless_public_key` + +Returns true if the public key is a keyless or federated keyless public key. + + +
public fun is_keyless_or_federated_keyless_public_key(pk: &single_key::AnyPublicKey): bool
+
+ + + +
+Implementation + + +
public fun is_keyless_or_federated_keyless_public_key(pk: &AnyPublicKey): bool {
+    match (pk) {
+        AnyPublicKey::Keyless { .. } => true,
+        AnyPublicKey::FederatedKeyless { .. } => true,
+        _ => false
+    }
+}
+
+ + + +
+ + + +## Function `from_ed25519_public_key_unvalidated` + +Converts an unvalidated Ed25519 public key to an AnyPublicKey. + + +
public fun from_ed25519_public_key_unvalidated(pk: ed25519::UnvalidatedPublicKey): single_key::AnyPublicKey
+
+ + + +
+Implementation + + +
public fun from_ed25519_public_key_unvalidated(pk: ed25519::UnvalidatedPublicKey): AnyPublicKey {
+    AnyPublicKey::Ed25519 { pk }
+}
+
+ + + +
+ + + +## Function `to_authentication_key` + +Gets the authentication key for the AnyPublicKey. + + +
public fun to_authentication_key(self: &single_key::AnyPublicKey): vector<u8>
+
+ + + +
+Implementation + + +
public fun to_authentication_key(self: &AnyPublicKey): vector<u8> {
+    let pk_bytes = bcs::to_bytes(self);
+    pk_bytes.push_back(SIGNATURE_SCHEME_ID);
+    hash::sha3_256(pk_bytes)
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/slots_storage.md b/aptos-move/framework/aptos-stdlib/doc/slots_storage.md new file mode 100644 index 0000000000000..b364e8cc05633 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/slots_storage.md @@ -0,0 +1,463 @@ + + + +# Module `0x1::slots_storage` + + + +- [Enum `Link`](#0x1_slots_storage_Link) +- [Struct `SlotsStorage`](#0x1_slots_storage_SlotsStorage) +- [Struct `TransientSlot`](#0x1_slots_storage_TransientSlot) +- [Constants](#@Constants_0) +- [Function `new_storage_slots`](#0x1_slots_storage_new_storage_slots) +- [Function `add`](#0x1_slots_storage_add) +- [Function `remove`](#0x1_slots_storage_remove) +- [Function `destroy_empty`](#0x1_slots_storage_destroy_empty) +- [Function `borrow`](#0x1_slots_storage_borrow) +- [Function `borrow_mut`](#0x1_slots_storage_borrow_mut) +- [Function `get_index`](#0x1_slots_storage_get_index) +- [Function `create_transient_slot`](#0x1_slots_storage_create_transient_slot) +- [Function `add_transient_slot`](#0x1_slots_storage_add_transient_slot) +- [Function `transiently_remove`](#0x1_slots_storage_transiently_remove) +- [Function `destroy_transient_slot`](#0x1_slots_storage_destroy_transient_slot) + + +
use 0x1::table;
+
+ + + + + +## Enum `Link` + + + +
enum Link<T: store> has store
+
+ + + +
+Variants + + +
+Some + + +
+Fields + + +
+
+value: T +
+
+ +
+
+ + +
+ +
+ +
+Empty + + +
+Fields + + +
+
+next: u64 +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Struct `SlotsStorage` + + + +
struct SlotsStorage<T: store> has store
+
+ + + +
+Fields + + +
+
+slots: table::Table<u64, slots_storage::Link<T>> +
+
+ +
+
+new_slot_index: u64 +
+
+ +
+
+never_deallocate: bool +
+
+ +
+
+reuse_head_index: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TransientSlot` + + + +
struct TransientSlot
+
+ + + +
+Fields + + +
+
+slot_index: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const NULL_INDEX: u64 = 0;
+
+ + + + + +## Function `new_storage_slots` + + + +
public fun new_storage_slots<T: store>(): slots_storage::SlotsStorage<T>
+
+ + + +
+Implementation + + +
public fun new_storage_slots<T: store>(): SlotsStorage<T> {
+    SlotsStorage {
+        slots: table::new(),
+        new_slot_index: 1,
+        never_deallocate: false,
+        reuse_head_index: NULL_INDEX,
+    }
+}
+
+ + + +
+ + + +## Function `add` + + + +
public fun add<T: store>(self: &mut slots_storage::SlotsStorage<T>, val: T): u64
+
+ + + +
+Implementation + + +
public fun add<T: store>(self: &mut SlotsStorage<T>, val: T): u64 {
+    let slot_index = self.new_slot_index;
+    self.new_slot_index = self.new_slot_index + 1;
+    self.slots.add(slot_index, Link::Some { value: val });
+    slot_index
+}
+
+ + + +
+ + + +## Function `remove` + + + +
public fun remove<T: store>(self: &mut slots_storage::SlotsStorage<T>, slot_index: u64): T
+
+ + + +
+Implementation + + +
public fun remove<T: store>(self: &mut SlotsStorage<T>, slot_index: u64): T {
+    let Link::Some { value } = self.slots.remove(slot_index);
+    value
+}
+
+ + + +
+ + + +## Function `destroy_empty` + + + +
public fun destroy_empty<T: store>(self: slots_storage::SlotsStorage<T>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<T: store>(self: SlotsStorage<T>) {
+    let SlotsStorage {
+        slots,
+        new_slot_index: _,
+        never_deallocate: _,
+        reuse_head_index: _,
+    } = self;
+    slots.destroy_empty();
+}
+
+ + + +
+ + + +## Function `borrow` + + + +
public fun borrow<T: store>(self: &slots_storage::SlotsStorage<T>, slot_index: u64): &T
+
+ + + +
+Implementation + + +
public fun borrow<T: store>(self: &SlotsStorage<T>, slot_index: u64): &T {
+    &self.slots.borrow(slot_index).value
+}
+
+ + + +
+ + + +## Function `borrow_mut` + + + +
public fun borrow_mut<T: store>(self: &mut slots_storage::SlotsStorage<T>, slot_index: u64): &mut T
+
+ + + +
+Implementation + + +
public fun borrow_mut<T: store>(self: &mut SlotsStorage<T>, slot_index: u64): &mut T {
+    &mut self.slots.borrow_mut(slot_index).value
+}
+
+ + + +
+ + + +## Function `get_index` + + + +
public fun get_index(self: &slots_storage::TransientSlot): u64
+
+ + + +
+Implementation + + +
public fun get_index(self: &TransientSlot): u64 {
+    self.slot_index
+}
+
+ + + +
+ + + +## Function `create_transient_slot` + + + +
public fun create_transient_slot<T: store>(self: &mut slots_storage::SlotsStorage<T>): slots_storage::TransientSlot
+
+ + + +
+Implementation + + +
public fun create_transient_slot<T: store>(self: &mut SlotsStorage<T>): TransientSlot {
+    let slot_index = self.new_slot_index;
+    self.new_slot_index = self.new_slot_index + 1;
+    TransientSlot {
+        slot_index,
+    }
+}
+
+ + + +
+ + + +## Function `add_transient_slot` + + + +
public fun add_transient_slot<T: store>(self: &mut slots_storage::SlotsStorage<T>, slot: slots_storage::TransientSlot, val: T)
+
+ + + +
+Implementation + + +
public fun add_transient_slot<T: store>(self: &mut SlotsStorage<T>, slot: TransientSlot, val: T) {
+    let TransientSlot { slot_index } = slot;
+    self.slots.add(slot_index, Link::Some { value: val });
+}
+
+ + + +
+ + + +## Function `transiently_remove` + + + +
public fun transiently_remove<T: store>(self: &mut slots_storage::SlotsStorage<T>, slot_index: u64): (slots_storage::TransientSlot, T)
+
+ + + +
+Implementation + + +
public fun transiently_remove<T: store>(self: &mut SlotsStorage<T>, slot_index: u64): (TransientSlot, T) {
+    let Link::Some { value } = self.slots.remove(slot_index);
+    (TransientSlot { slot_index }, value)
+}
+
+ + + +
+ + + +## Function `destroy_transient_slot` + + + +
public fun destroy_transient_slot(self: slots_storage::TransientSlot)
+
+ + + +
+Implementation + + +
public fun destroy_transient_slot(self: TransientSlot) {
+    let TransientSlot { slot_index: _ } = self;
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/smart_table.md b/aptos-move/framework/aptos-stdlib/doc/smart_table.md index d69931fabb804..0de67986393a9 100644 --- a/aptos-move/framework/aptos-stdlib/doc/smart_table.md +++ b/aptos-move/framework/aptos-stdlib/doc/smart_table.md @@ -10,6 +10,9 @@ when expanding to avoid unexpected gas cost. SmartTable uses faster hash function SipHash instead of cryptographically secure hash functions like sha3-256 since it tolerates collisions. +DEPRECATED: since it's implementation is inneficient, it +has been deprecated in favor of big_ordered_map.move. + - [Struct `Entry`](#0x1_smart_table_Entry) - [Struct `SmartTable`](#0x1_smart_table_SmartTable) @@ -326,7 +329,7 @@ dynamically assgined by the contract code. ): SmartTable<K, V> { assert!(split_load_threshold <= 100, error::invalid_argument(EINVALID_LOAD_THRESHOLD_PERCENT)); let buckets = table_with_length::new(); - table_with_length::add(&mut buckets, 0, vector::empty()); + buckets.add(0, vector::empty()); let table = SmartTable { buckets, num_buckets: 1, @@ -341,8 +344,8 @@ dynamically assgined by the contract code. num_initial_buckets = 2; }; while (num_initial_buckets > 1) { - num_initial_buckets = num_initial_buckets - 1; - split_one_bucket(&mut table); + num_initial_buckets -= 1; + table.split_one_bucket(); }; table } @@ -360,7 +363,7 @@ Destroy empty table. Aborts if it's not empty. -
public fun destroy_empty<K, V>(table: smart_table::SmartTable<K, V>)
+
public fun destroy_empty<K, V>(self: smart_table::SmartTable<K, V>)
 
@@ -369,15 +372,13 @@ Aborts if it's not empty. Implementation -
public fun destroy_empty<K, V>(table: SmartTable<K, V>) {
-    assert!(table.size == 0, error::invalid_argument(ENOT_EMPTY));
-    let i = 0;
-    while (i < table.num_buckets) {
-        vector::destroy_empty(table_with_length::remove(&mut table.buckets, i));
-        i = i + 1;
+
public fun destroy_empty<K, V>(self: SmartTable<K, V>) {
+    assert!(self.size == 0, error::invalid_argument(ENOT_EMPTY));
+    for (i in 0..self.num_buckets) {
+        self.buckets.remove(i).destroy_empty();
     };
-    let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = table;
-    table_with_length::destroy_empty(buckets);
+    let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = self;
+    buckets.destroy_empty();
 }
 
@@ -392,7 +393,7 @@ Aborts if it's not empty. Destroy a table completely when V has drop. -
public fun destroy<K: drop, V: drop>(table: smart_table::SmartTable<K, V>)
+
public fun destroy<K: drop, V: drop>(self: smart_table::SmartTable<K, V>)
 
@@ -401,9 +402,9 @@ Destroy a table completely when V has drop. Implementation -
public fun destroy<K: drop, V: drop>(table: SmartTable<K, V>) {
-    clear(&mut table);
-    destroy_empty(table);
+
public fun destroy<K: drop, V: drop>(self: SmartTable<K, V>) {
+    self.clear();
+    self.destroy_empty();
 }
 
@@ -418,7 +419,7 @@ Destroy a table completely when V has drop. Clear a table completely when T has drop. -
public fun clear<K: drop, V: drop>(table: &mut smart_table::SmartTable<K, V>)
+
public fun clear<K: drop, V: drop>(self: &mut smart_table::SmartTable<K, V>)
 
@@ -427,16 +428,14 @@ Clear a table completely when T has drop. Implementation -
public fun clear<K: drop, V: drop>(table: &mut SmartTable<K, V>) {
-    *table_with_length::borrow_mut(&mut table.buckets, 0) = vector::empty();
-    let i = 1;
-    while (i < table.num_buckets) {
-        table_with_length::remove(&mut table.buckets, i);
-        i = i + 1;
+
public fun clear<K: drop, V: drop>(self: &mut SmartTable<K, V>) {
+    *self.buckets.borrow_mut(0) = vector::empty();
+    for (i in 1..self.num_buckets) {
+        self.buckets.remove(i);
     };
-    table.num_buckets = 1;
-    table.level = 0;
-    table.size = 0;
+    self.num_buckets = 1;
+    self.level = 0;
+    self.size = 0;
 }
 
@@ -455,7 +454,7 @@ Abort if key already exists. Note: This method may occasionally cost much more gas when triggering bucket split. -
public fun add<K, V>(table: &mut smart_table::SmartTable<K, V>, key: K, value: V)
+
public fun add<K, V>(self: &mut smart_table::SmartTable<K, V>, key: K, value: V)
 
@@ -464,26 +463,26 @@ Note: This method may occasionally cost much more gas when triggering bucket spl Implementation -
public fun add<K, V>(table: &mut SmartTable<K, V>, key: K, value: V) {
+
public fun add<K, V>(self: &mut SmartTable<K, V>, key: K, value: V) {
     let hash = sip_hash_from_value(&key);
-    let index = bucket_index(table.level, table.num_buckets, hash);
-    let bucket = table_with_length::borrow_mut(&mut table.buckets, index);
+    let index = bucket_index(self.level, self.num_buckets, hash);
+    let bucket = self.buckets.borrow_mut(index);
     // We set a per-bucket limit here with a upper bound (10000) that nobody should normally reach.
-    assert!(vector::length(bucket) <= 10000, error::permission_denied(EEXCEED_MAX_BUCKET_SIZE));
-    assert!(vector::all(bucket, | entry | {
+    assert!(bucket.length() <= 10000, error::permission_denied(EEXCEED_MAX_BUCKET_SIZE));
+    assert!(bucket.all(| entry | {
         let e: &Entry<K, V> = entry;
         &e.key != &key
     }), error::invalid_argument(EALREADY_EXIST));
     let e = Entry { hash, key, value };
-    if (table.target_bucket_size == 0) {
+    if (self.target_bucket_size == 0) {
         let estimated_entry_size = max(size_of_val(&e), 1);
-        table.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1);
+        self.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1);
     };
-    vector::push_back(bucket, e);
-    table.size = table.size + 1;
+    bucket.push_back(e);
+    self.size += 1;
 
-    if (load_factor(table) >= (table.split_load_threshold as u64)) {
-        split_one_bucket(table);
+    if (self.load_factor() >= (self.split_load_threshold as u64)) {
+        self.split_one_bucket();
     }
 }
 
@@ -499,7 +498,7 @@ Note: This method may occasionally cost much more gas when triggering bucket spl Add multiple key/value pairs to the smart table. The keys must not already exist. -
public fun add_all<K, V>(table: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
+
public fun add_all<K, V>(self: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
 
@@ -508,8 +507,8 @@ Add multiple key/value pairs to the smart table. The keys must not already exist Implementation -
public fun add_all<K, V>(table: &mut SmartTable<K, V>, keys: vector<K>, values: vector<V>) {
-    vector::zip(keys, values, |key, value| { add(table, key, value); });
+
public fun add_all<K, V>(self: &mut SmartTable<K, V>, keys: vector<K>, values: vector<V>) {
+    keys.zip(values, |key, value| { self.add(key, value); });
 }
 
@@ -535,10 +534,10 @@ Add multiple key/value pairs to the smart table. The keys must not already exist
inline fun unzip_entries<K: copy, V: copy>(entries: &vector<Entry<K, V>>): (vector<K>, vector<V>) {
     let keys = vector[];
     let values = vector[];
-    vector::for_each_ref(entries, |e|{
+    entries.for_each_ref(|e|{
         let entry: &Entry<K, V> = e;
-        vector::push_back(&mut keys, entry.key);
-        vector::push_back(&mut values, entry.value);
+        keys.push_back(entry.key);
+        values.push_back(entry.value);
     });
     (keys, values)
 }
@@ -557,7 +556,7 @@ view of the whole table.
 Disclaimer: This function may be costly as the smart table may be huge in size. Use it at your own discretion.
 
 
-
public fun to_simple_map<K: copy, drop, store, V: copy, store>(table: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
+
public fun to_simple_map<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
 
@@ -567,14 +566,12 @@ Disclaimer: This function may be costly as the smart table may be huge in size.
public fun to_simple_map<K: store + copy + drop, V: store + copy>(
-    table: &SmartTable<K, V>,
+    self: &SmartTable<K, V>,
 ): SimpleMap<K, V> {
-    let i = 0;
     let res = simple_map::new<K, V>();
-    while (i < table.num_buckets) {
-        let (keys, values) = unzip_entries(table_with_length::borrow(&table.buckets, i));
-        simple_map::add_all(&mut res, keys, values);
-        i = i + 1;
+    for (i in 0..self.num_buckets) {
+        let (keys, values) = unzip_entries(self.buckets.borrow(i));
+        res.add_all(keys, values);
     };
     res
 }
@@ -594,7 +591,7 @@ For a large enough smart table this function will fail due to execution gas limi
 keys_paginated should be used instead.
 
 
-
public fun keys<K: copy, drop, store, V: copy, store>(table_ref: &smart_table::SmartTable<K, V>): vector<K>
+
public fun keys<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): vector<K>
 
@@ -604,9 +601,9 @@ For a large enough smart table this function will fail due to execution gas limi
public fun keys<K: store + copy + drop, V: store + copy>(
-    table_ref: &SmartTable<K, V>
+    self: &SmartTable<K, V>
 ): vector<K> {
-    let (keys, _, _) = keys_paginated(table_ref, 0, 0, length(table_ref));
+    let (keys, _, _) = self.keys_paginated(0, 0, self.length());
     keys
 }
 
@@ -634,7 +631,7 @@ returned bucket index and vector index value options are both none, which means pagination is complete. For an example, see test_keys(). -
public fun keys_paginated<K: copy, drop, store, V: copy, store>(table_ref: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
+
public fun keys_paginated<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
 
@@ -644,7 +641,7 @@ pagination is complete. For an example, see test_keys().
public fun keys_paginated<K: store + copy + drop, V: store + copy>(
-    table_ref: &SmartTable<K, V>,
+    self: &SmartTable<K, V>,
     starting_bucket_index: u64,
     starting_vector_index: u64,
     num_keys_to_get: u64,
@@ -653,11 +650,11 @@ pagination is complete. For an example, see test_keys().
     Option<u64>,
     Option<u64>,
 ) {
-    let num_buckets = table_ref.num_buckets;
-    let buckets_ref = &table_ref.buckets;
+    let num_buckets = self.num_buckets;
+    let buckets_ref = &self.buckets;
     assert!(starting_bucket_index < num_buckets, EINVALID_BUCKET_INDEX);
-    let bucket_ref = table_with_length::borrow(buckets_ref, starting_bucket_index);
-    let bucket_length = vector::length(bucket_ref);
+    let bucket_ref = buckets_ref.borrow(starting_bucket_index);
+    let bucket_length = bucket_ref.length();
     assert!(
         // In the general case, starting vector index should never be equal to bucket length
         // because then iteration will attempt to borrow a vector element that is out of bounds.
@@ -671,15 +668,15 @@ pagination is complete. For an example, see test_keys().
     if (num_keys_to_get == 0) return
         (keys, option::some(starting_bucket_index), option::some(starting_vector_index));
     for (bucket_index in starting_bucket_index..num_buckets) {
-        bucket_ref = table_with_length::borrow(buckets_ref, bucket_index);
-        bucket_length = vector::length(bucket_ref);
+        bucket_ref = buckets_ref.borrow(bucket_index);
+        bucket_length = bucket_ref.length();
         for (vector_index in starting_vector_index..bucket_length) {
-            vector::push_back(&mut keys, vector::borrow(bucket_ref, vector_index).key);
-            num_keys_to_get = num_keys_to_get - 1;
+            keys.push_back(bucket_ref.borrow(vector_index).key);
+            num_keys_to_get -= 1;
             if (num_keys_to_get == 0) {
-                vector_index = vector_index + 1;
+                vector_index += 1;
                 return if (vector_index == bucket_length) {
-                    bucket_index = bucket_index + 1;
+                    bucket_index += 1;
                     if (bucket_index < num_buckets) {
                         (keys, option::some(bucket_index), option::some(0))
                     } else {
@@ -707,7 +704,7 @@ pagination is complete. For an example, see test_keys().
 Decide which is the next bucket to split and split it into two with the elements inside the bucket.
 
 
-
fun split_one_bucket<K, V>(table: &mut smart_table::SmartTable<K, V>)
+
fun split_one_bucket<K, V>(self: &mut smart_table::SmartTable<K, V>)
 
@@ -716,23 +713,23 @@ Decide which is the next bucket to split and split it into two with the elements Implementation -
fun split_one_bucket<K, V>(table: &mut SmartTable<K, V>) {
-    let new_bucket_index = table.num_buckets;
+
fun split_one_bucket<K, V>(self: &mut SmartTable<K, V>) {
+    let new_bucket_index = self.num_buckets;
     // the next bucket to split is num_bucket without the most significant bit.
-    let to_split = new_bucket_index ^ (1 << table.level);
-    table.num_buckets = new_bucket_index + 1;
+    let to_split = new_bucket_index ^ (1 << self.level);
+    self.num_buckets = new_bucket_index + 1;
     // if the whole level is splitted once, bump the level.
-    if (to_split + 1 == 1 << table.level) {
-        table.level = table.level + 1;
+    if (to_split + 1 == 1 << self.level) {
+        self.level += 1;
     };
-    let old_bucket = table_with_length::borrow_mut(&mut table.buckets, to_split);
+    let old_bucket = self.buckets.borrow_mut(to_split);
     // partition the bucket, [0..p) stays in old bucket, [p..len) goes to new bucket
-    let p = vector::partition(old_bucket, |e| {
+    let p = old_bucket.partition(|e| {
         let entry: &Entry<K, V> = e; // Explicit type to satisfy compiler
-        bucket_index(table.level, table.num_buckets, entry.hash) != new_bucket_index
+        bucket_index(self.level, self.num_buckets, entry.hash) != new_bucket_index
     });
-    let new_bucket = vector::trim_reverse(old_bucket, p);
-    table_with_length::add(&mut table.buckets, new_bucket_index, new_bucket);
+    let new_bucket = old_bucket.trim_reverse(p);
+    self.buckets.add(new_bucket_index, new_bucket);
 }
 
@@ -782,7 +779,7 @@ Acquire an immutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow<K: drop, V>(table: &smart_table::SmartTable<K, V>, key: K): &V
+
public fun borrow<K: drop, V>(self: &smart_table::SmartTable<K, V>, key: K): &V
 
@@ -791,17 +788,15 @@ Aborts if there is no entry for key. Implementation -
public fun borrow<K: drop, V>(table: &SmartTable<K, V>, key: K): &V {
-    let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key));
-    let bucket = table_with_length::borrow(&table.buckets, index);
-    let i = 0;
-    let len = vector::length(bucket);
-    while (i < len) {
-        let entry = vector::borrow(bucket, i);
+
public fun borrow<K: drop, V>(self: &SmartTable<K, V>, key: K): &V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = self.buckets.borrow(index);
+    let len = bucket.length();
+    for (i in 0..len) {
+        let entry = bucket.borrow(i);
         if (&entry.key == &key) {
             return &entry.value
         };
-        i = i + 1;
     };
     abort error::invalid_argument(ENOT_FOUND)
 }
@@ -819,7 +814,7 @@ Acquire an immutable reference to the value which key maps to.
 Returns specified default value if there is no entry for key.
 
 
-
public fun borrow_with_default<K: copy, drop, V>(table: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
+
public fun borrow_with_default<K: copy, drop, V>(self: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
 
@@ -828,11 +823,11 @@ Returns specified default value if there is no entry for key. Implementation -
public fun borrow_with_default<K: copy + drop, V>(table: &SmartTable<K, V>, key: K, default: &V): &V {
-    if (!contains(table, copy key)) {
+
public fun borrow_with_default<K: copy + drop, V>(self: &SmartTable<K, V>, key: K, default: &V): &V {
+    if (!self.contains(copy key)) {
         default
     } else {
-        borrow(table, copy key)
+        self.borrow(copy key)
     }
 }
 
@@ -849,7 +844,7 @@ Acquire a mutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow_mut<K: drop, V>(table: &mut smart_table::SmartTable<K, V>, key: K): &mut V
+
public fun borrow_mut<K: drop, V>(self: &mut smart_table::SmartTable<K, V>, key: K): &mut V
 
@@ -858,17 +853,15 @@ Aborts if there is no entry for key. Implementation -
public fun borrow_mut<K: drop, V>(table: &mut SmartTable<K, V>, key: K): &mut V {
-    let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key));
-    let bucket = table_with_length::borrow_mut(&mut table.buckets, index);
-    let i = 0;
-    let len = vector::length(bucket);
-    while (i < len) {
-        let entry = vector::borrow_mut(bucket, i);
+
public fun borrow_mut<K: drop, V>(self: &mut SmartTable<K, V>, key: K): &mut V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = self.buckets.borrow_mut(index);
+    let len = bucket.length();
+    for (i in 0..len) {
+        let entry = bucket.borrow_mut(i);
         if (&entry.key == &key) {
             return &mut entry.value
         };
-        i = i + 1;
     };
     abort error::invalid_argument(ENOT_FOUND)
 }
@@ -886,7 +879,7 @@ Acquire a mutable reference to the value which key maps to.
 Insert the pair (key, default) first if there is no entry for key.
 
 
-
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut smart_table::SmartTable<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut smart_table::SmartTable<K, V>, key: K, default: V): &mut V
 
@@ -896,14 +889,14 @@ Insert the pair (key, default) first if there is no en
public fun borrow_mut_with_default<K: copy + drop, V: drop>(
-    table: &mut SmartTable<K, V>,
+    self: &mut SmartTable<K, V>,
     key: K,
     default: V
 ): &mut V {
-    if (!contains(table, copy key)) {
-        add(table, copy key, default)
+    if (!self.contains(copy key)) {
+        self.add(copy key, default)
     };
-    borrow_mut(table, key)
+    self.borrow_mut(key)
 }
 
@@ -918,7 +911,7 @@ Insert the pair (key, default) first if there is no en Returns true iff table contains an entry for key. -
public fun contains<K: drop, V>(table: &smart_table::SmartTable<K, V>, key: K): bool
+
public fun contains<K: drop, V>(self: &smart_table::SmartTable<K, V>, key: K): bool
 
@@ -927,12 +920,11 @@ Returns true iff table contains an Implementation -
public fun contains<K: drop, V>(table: &SmartTable<K, V>, key: K): bool {
+
public fun contains<K: drop, V>(self: &SmartTable<K, V>, key: K): bool {
     let hash = sip_hash_from_value(&key);
-    let index = bucket_index(table.level, table.num_buckets, hash);
-    let bucket = table_with_length::borrow(&table.buckets, index);
-    vector::any(bucket, | entry | {
-        let e: &Entry<K, V> = entry;
+    let index = bucket_index(self.level, self.num_buckets, hash);
+    let bucket = self.buckets.borrow(index);
+    bucket.any(| e | {
         e.hash == hash && &e.key == &key
     })
 }
@@ -950,7 +942,7 @@ Remove from table and return the v
 Aborts if there is no entry for key.
 
 
-
public fun remove<K: copy, drop, V>(table: &mut smart_table::SmartTable<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut smart_table::SmartTable<K, V>, key: K): V
 
@@ -959,19 +951,17 @@ Aborts if there is no entry for key. Implementation -
public fun remove<K: copy + drop, V>(table: &mut SmartTable<K, V>, key: K): V {
-    let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key));
-    let bucket = table_with_length::borrow_mut(&mut table.buckets, index);
-    let i = 0;
-    let len = vector::length(bucket);
-    while (i < len) {
-        let entry = vector::borrow(bucket, i);
+
public fun remove<K: copy + drop, V>(self: &mut SmartTable<K, V>, key: K): V {
+    let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key));
+    let bucket = self.buckets.borrow_mut(index);
+    let len = bucket.length();
+    for (i in 0..len) {
+        let entry = bucket.borrow(i);
         if (&entry.key == &key) {
-            let Entry { hash: _, key: _, value } = vector::swap_remove(bucket, i);
-            table.size = table.size - 1;
+            let Entry { hash: _, key: _, value } = bucket.swap_remove(i);
+            self.size -= 1;
             return value
         };
-        i = i + 1;
     };
     abort error::invalid_argument(ENOT_FOUND)
 }
@@ -989,7 +979,7 @@ Insert the pair (key, value) if there is no entry for
 update the value of the entry for key to value otherwise
 
 
-
public fun upsert<K: copy, drop, V: drop>(table: &mut smart_table::SmartTable<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut smart_table::SmartTable<K, V>, key: K, value: V)
 
@@ -998,11 +988,11 @@ update the value of the entry for key to value otherwi Implementation -
public fun upsert<K: copy + drop, V: drop>(table: &mut SmartTable<K, V>, key: K, value: V) {
-    if (!contains(table, copy key)) {
-        add(table, copy key, value)
+
public fun upsert<K: copy + drop, V: drop>(self: &mut SmartTable<K, V>, key: K, value: V) {
+    if (!self.contains(copy key)) {
+        self.add(copy key, value)
     } else {
-        let ref = borrow_mut(table, key);
+        let ref = self.borrow_mut(key);
         *ref = value;
     };
 }
@@ -1019,7 +1009,7 @@ update the value of the entry for key to value otherwi
 Returns the length of the table, i.e. the number of entries.
 
 
-
public fun length<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun length<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1028,8 +1018,8 @@ Returns the length of the table, i.e. the number of entries. Implementation -
public fun length<K, V>(table: &SmartTable<K, V>): u64 {
-    table.size
+
public fun length<K, V>(self: &SmartTable<K, V>): u64 {
+    self.size
 }
 
@@ -1044,7 +1034,7 @@ Returns the length of the table, i.e. the number of entries. Return the load factor of the hashtable. -
public fun load_factor<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun load_factor<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1053,8 +1043,8 @@ Return the load factor of the hashtable. Implementation -
public fun load_factor<K, V>(table: &SmartTable<K, V>): u64 {
-    table.size * 100 / table.num_buckets / table.target_bucket_size
+
public fun load_factor<K, V>(self: &SmartTable<K, V>): u64 {
+    self.size * 100 / self.num_buckets / self.target_bucket_size
 }
 
@@ -1069,7 +1059,7 @@ Return the load factor of the hashtable. Update split_load_threshold. -
public fun update_split_load_threshold<K, V>(table: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
+
public fun update_split_load_threshold<K, V>(self: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
 
@@ -1078,12 +1068,12 @@ Update split_load_threshold. Implementation -
public fun update_split_load_threshold<K, V>(table: &mut SmartTable<K, V>, split_load_threshold: u8) {
+
public fun update_split_load_threshold<K, V>(self: &mut SmartTable<K, V>, split_load_threshold: u8) {
     assert!(
         split_load_threshold <= 100 && split_load_threshold > 0,
         error::invalid_argument(EINVALID_LOAD_THRESHOLD_PERCENT)
     );
-    table.split_load_threshold = split_load_threshold;
+    self.split_load_threshold = split_load_threshold;
 }
 
@@ -1098,7 +1088,7 @@ Update split_load_threshold. Update target_bucket_size. -
public fun update_target_bucket_size<K, V>(table: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
+
public fun update_target_bucket_size<K, V>(self: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
 
@@ -1107,9 +1097,9 @@ Update target_bucket_size. Implementation -
public fun update_target_bucket_size<K, V>(table: &mut SmartTable<K, V>, target_bucket_size: u64) {
+
public fun update_target_bucket_size<K, V>(self: &mut SmartTable<K, V>, target_bucket_size: u64) {
     assert!(target_bucket_size > 0, error::invalid_argument(EINVALID_TARGET_BUCKET_SIZE));
-    table.target_bucket_size = target_bucket_size;
+    self.target_bucket_size = target_bucket_size;
 }
 
@@ -1124,7 +1114,7 @@ Update target_bucket_size. Apply the function to a reference of each key-value pair in the table. -
public fun for_each_ref<K, V>(table: &smart_table::SmartTable<K, V>, f: |(&K, &V)|)
+
public fun for_each_ref<K, V>(self: &smart_table::SmartTable<K, V>, f: |(&K, &V)|)
 
@@ -1133,17 +1123,12 @@ Apply the function to a reference of each key-value pair in the table. Implementation -
public inline fun for_each_ref<K, V>(table: &SmartTable<K, V>, f: |&K, &V|) {
-    let i = 0;
-    while (i < aptos_std::smart_table::num_buckets(table)) {
-        vector::for_each_ref(
-            aptos_std::table_with_length::borrow(aptos_std::smart_table::borrow_buckets(table), i),
-            |elem| {
-                let (key, value) = aptos_std::smart_table::borrow_kv(elem);
-                f(key, value)
-            }
-        );
-        i = i + 1;
+
public inline fun for_each_ref<K, V>(self: &SmartTable<K, V>, f: |&K, &V|) {
+    for (i in 0..self.num_buckets()) {
+        self.borrow_buckets().borrow(i).for_each_ref(|elem| {
+            let (key, value) = elem.borrow_kv();
+            f(key, value)
+        });
     }
 }
 
@@ -1159,7 +1144,7 @@ Apply the function to a reference of each key-value pair in the table. Apply the function to a mutable reference of each key-value pair in the table. -
public fun for_each_mut<K, V>(table: &mut smart_table::SmartTable<K, V>, f: |(&K, &mut V)|)
+
public fun for_each_mut<K, V>(self: &mut smart_table::SmartTable<K, V>, f: |(&K, &mut V)|)
 
@@ -1168,17 +1153,12 @@ Apply the function to a mutable reference of each key-value pair in the table. Implementation -
public inline fun for_each_mut<K, V>(table: &mut SmartTable<K, V>, f: |&K, &mut V|) {
-    let i = 0;
-    while (i < aptos_std::smart_table::num_buckets(table)) {
-        vector::for_each_mut(
-            table_with_length::borrow_mut(aptos_std::smart_table::borrow_buckets_mut(table), i),
-            |elem| {
-                let (key, value) = aptos_std::smart_table::borrow_kv_mut(elem);
-                f(key, value)
-            }
-        );
-        i = i + 1;
+
public inline fun for_each_mut<K, V>(self: &mut SmartTable<K, V>, f: |&K, &mut V|) {
+    for (i in 0..self.num_buckets()) {
+        self.borrow_buckets_mut().borrow_mut(i).for_each_mut(|elem| {
+            let (key, value) = elem.borrow_kv_mut();
+            f(key, value)
+        });
     };
 }
 
@@ -1194,7 +1174,7 @@ Apply the function to a mutable reference of each key-value pair in the table. Map the function over the references of key-value pairs in the table without modifying it. -
public fun map_ref<K: copy, drop, store, V1, V2: store>(table: &smart_table::SmartTable<K, V1>, f: |&V1|V2): smart_table::SmartTable<K, V2>
+
public fun map_ref<K: copy, drop, store, V1, V2: store>(self: &smart_table::SmartTable<K, V1>, f: |&V1|V2): smart_table::SmartTable<K, V2>
 
@@ -1204,11 +1184,11 @@ Map the function over the references of key-value pairs in the table without mod
public inline fun map_ref<K: copy + drop + store, V1, V2: store>(
-    table: &SmartTable<K, V1>,
+    self: &SmartTable<K, V1>,
     f: |&V1|V2
 ): SmartTable<K, V2> {
     let new_table = new<K, V2>();
-    for_each_ref(table, |key, value| add(&mut new_table, *key, f(value)));
+    self.for_each_ref(|key, value| new_table.add(*key, f(value)));
     new_table
 }
 
@@ -1224,7 +1204,7 @@ Map the function over the references of key-value pairs in the table without mod Return true if any key-value pair in the table satisfies the predicate. -
public fun any<K, V>(table: &smart_table::SmartTable<K, V>, p: |(&K, &V)|bool): bool
+
public fun any<K, V>(self: &smart_table::SmartTable<K, V>, p: |(&K, &V)|bool): bool
 
@@ -1234,18 +1214,16 @@ Return true if any key-value pair in the table satisfies the predicate.
public inline fun any<K, V>(
-    table: &SmartTable<K, V>,
+    self: &SmartTable<K, V>,
     p: |&K, &V|bool
 ): bool {
     let found = false;
-    let i = 0;
-    while (i < aptos_std::smart_table::num_buckets(table)) {
-        found = vector::any(table_with_length::borrow(aptos_std::smart_table::borrow_buckets(table), i), |elem| {
-            let (key, value) = aptos_std::smart_table::borrow_kv(elem);
+    for (i in 0..self.num_buckets()) {
+        found = self.borrow_buckets().borrow(i).any(|elem| {
+            let (key, value) = elem.borrow_kv();
             p(key, value)
         });
         if (found) break;
-        i = i + 1;
     };
     found
 }
@@ -1261,7 +1239,7 @@ Return true if any key-value pair in the table satisfies the predicate.
 
 
 
-
public fun borrow_kv<K, V>(e: &smart_table::Entry<K, V>): (&K, &V)
+
public fun borrow_kv<K, V>(self: &smart_table::Entry<K, V>): (&K, &V)
 
@@ -1270,8 +1248,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun borrow_kv<K, V>(e: &Entry<K, V>): (&K, &V) {
-    (&e.key, &e.value)
+
public fun borrow_kv<K, V>(self: &Entry<K, V>): (&K, &V) {
+    (&self.key, &self.value)
 }
 
@@ -1285,7 +1263,7 @@ Return true if any key-value pair in the table satisfies the predicate. -
public fun borrow_kv_mut<K, V>(e: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
+
public fun borrow_kv_mut<K, V>(self: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
 
@@ -1294,8 +1272,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun borrow_kv_mut<K, V>(e: &mut Entry<K, V>): (&mut K, &mut V) {
-    (&mut e.key, &mut e.value)
+
public fun borrow_kv_mut<K, V>(self: &mut Entry<K, V>): (&mut K, &mut V) {
+    (&mut self.key, &mut self.value)
 }
 
@@ -1309,7 +1287,7 @@ Return true if any key-value pair in the table satisfies the predicate. -
public fun num_buckets<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun num_buckets<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1318,8 +1296,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun num_buckets<K, V>(table: &SmartTable<K, V>): u64 {
-    table.num_buckets
+
public fun num_buckets<K, V>(self: &SmartTable<K, V>): u64 {
+    self.num_buckets
 }
 
@@ -1333,7 +1311,7 @@ Return true if any key-value pair in the table satisfies the predicate. -
public fun borrow_buckets<K, V>(table: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
public fun borrow_buckets<K, V>(self: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
 
@@ -1342,8 +1320,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun borrow_buckets<K, V>(table: &SmartTable<K, V>): &TableWithLength<u64, vector<Entry<K, V>>> {
-    &table.buckets
+
public fun borrow_buckets<K, V>(self: &SmartTable<K, V>): &TableWithLength<u64, vector<Entry<K, V>>> {
+    &self.buckets
 }
 
@@ -1357,7 +1335,7 @@ Return true if any key-value pair in the table satisfies the predicate. -
public fun borrow_buckets_mut<K, V>(table: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
public fun borrow_buckets_mut<K, V>(self: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
 
@@ -1366,8 +1344,8 @@ Return true if any key-value pair in the table satisfies the predicate. Implementation -
public fun borrow_buckets_mut<K, V>(table: &mut SmartTable<K, V>): &mut TableWithLength<u64, vector<Entry<K, V>>> {
-    &mut table.buckets
+
public fun borrow_buckets_mut<K, V>(self: &mut SmartTable<K, V>): &mut TableWithLength<u64, vector<Entry<K, V>>> {
+    &mut self.buckets
 }
 
@@ -1442,6 +1420,7 @@ Return true if any key-value pair in the table satisfies the predicate. map_borrow = borrow, map_borrow_mut = borrow_mut, map_borrow_mut_with_default = borrow_mut_with_default, + map_borrow_with_default = borrow_with_default, map_spec_get = spec_get, map_spec_set = spec_set, map_spec_del = spec_remove, @@ -1472,13 +1451,14 @@ map_spec_has_key = spec_contains; ### Function `destroy` -
public fun destroy<K: drop, V: drop>(table: smart_table::SmartTable<K, V>)
+
public fun destroy<K: drop, V: drop>(self: smart_table::SmartTable<K, V>)
 
pragma verify = false;
+pragma opaque;
 
@@ -1488,13 +1468,14 @@ map_spec_has_key = spec_contains; ### Function `clear` -
public fun clear<K: drop, V: drop>(table: &mut smart_table::SmartTable<K, V>)
+
public fun clear<K: drop, V: drop>(self: &mut smart_table::SmartTable<K, V>)
 
pragma verify = false;
+pragma opaque;
 
@@ -1504,7 +1485,7 @@ map_spec_has_key = spec_contains; ### Function `add_all` -
public fun add_all<K, V>(table: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
+
public fun add_all<K, V>(self: &mut smart_table::SmartTable<K, V>, keys: vector<K>, values: vector<V>)
 
@@ -1520,7 +1501,7 @@ map_spec_has_key = spec_contains; ### Function `to_simple_map` -
public fun to_simple_map<K: copy, drop, store, V: copy, store>(table: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
+
public fun to_simple_map<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): simple_map::SimpleMap<K, V>
 
@@ -1536,7 +1517,7 @@ map_spec_has_key = spec_contains; ### Function `keys` -
public fun keys<K: copy, drop, store, V: copy, store>(table_ref: &smart_table::SmartTable<K, V>): vector<K>
+
public fun keys<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>): vector<K>
 
@@ -1552,7 +1533,7 @@ map_spec_has_key = spec_contains; ### Function `keys_paginated` -
public fun keys_paginated<K: copy, drop, store, V: copy, store>(table_ref: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
+
public fun keys_paginated<K: copy, drop, store, V: copy, store>(self: &smart_table::SmartTable<K, V>, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64): (vector<K>, option::Option<u64>, option::Option<u64>)
 
@@ -1568,7 +1549,7 @@ map_spec_has_key = spec_contains; ### Function `split_one_bucket` -
fun split_one_bucket<K, V>(table: &mut smart_table::SmartTable<K, V>)
+
fun split_one_bucket<K, V>(self: &mut smart_table::SmartTable<K, V>)
 
@@ -1600,7 +1581,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_with_default` -
public fun borrow_with_default<K: copy, drop, V>(table: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
+
public fun borrow_with_default<K: copy, drop, V>(self: &smart_table::SmartTable<K, V>, key: K, default: &V): &V
 
@@ -1616,7 +1597,7 @@ map_spec_has_key = spec_contains; ### Function `load_factor` -
public fun load_factor<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun load_factor<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1632,7 +1613,7 @@ map_spec_has_key = spec_contains; ### Function `update_split_load_threshold` -
public fun update_split_load_threshold<K, V>(table: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
+
public fun update_split_load_threshold<K, V>(self: &mut smart_table::SmartTable<K, V>, split_load_threshold: u8)
 
@@ -1648,7 +1629,7 @@ map_spec_has_key = spec_contains; ### Function `update_target_bucket_size` -
public fun update_target_bucket_size<K, V>(table: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
+
public fun update_target_bucket_size<K, V>(self: &mut smart_table::SmartTable<K, V>, target_bucket_size: u64)
 
@@ -1664,7 +1645,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_kv` -
public fun borrow_kv<K, V>(e: &smart_table::Entry<K, V>): (&K, &V)
+
public fun borrow_kv<K, V>(self: &smart_table::Entry<K, V>): (&K, &V)
 
@@ -1680,7 +1661,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_kv_mut` -
public fun borrow_kv_mut<K, V>(e: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
+
public fun borrow_kv_mut<K, V>(self: &mut smart_table::Entry<K, V>): (&mut K, &mut V)
 
@@ -1696,7 +1677,7 @@ map_spec_has_key = spec_contains; ### Function `num_buckets` -
public fun num_buckets<K, V>(table: &smart_table::SmartTable<K, V>): u64
+
public fun num_buckets<K, V>(self: &smart_table::SmartTable<K, V>): u64
 
@@ -1712,7 +1693,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_buckets` -
public fun borrow_buckets<K, V>(table: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
public fun borrow_buckets<K, V>(self: &smart_table::SmartTable<K, V>): &table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
 
@@ -1728,7 +1709,7 @@ map_spec_has_key = spec_contains; ### Function `borrow_buckets_mut` -
public fun borrow_buckets_mut<K, V>(table: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
+
public fun borrow_buckets_mut<K, V>(self: &mut smart_table::SmartTable<K, V>): &mut table_with_length::TableWithLength<u64, vector<smart_table::Entry<K, V>>>
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/smart_vector.md b/aptos-move/framework/aptos-stdlib/doc/smart_vector.md index 45c5e9ddf75ed..3343c751729c4 100644 --- a/aptos-move/framework/aptos-stdlib/doc/smart_vector.md +++ b/aptos-move/framework/aptos-stdlib/doc/smart_vector.md @@ -50,6 +50,7 @@ - [Struct `SmartVector`](#@Specification_1_SmartVector) - [Function `empty`](#@Specification_1_empty) - [Function `empty_with_config`](#@Specification_1_empty_with_config) + - [Function `singleton`](#@Specification_1_singleton) - [Function `destroy_empty`](#@Specification_1_destroy_empty) - [Function `borrow`](#@Specification_1_borrow) - [Function `append`](#@Specification_1_append) @@ -284,7 +285,7 @@ Create a vector of length 1 containing the passed in T.
public fun singleton<T: store>(element: T): SmartVector<T> {
     let v = empty();
-    push_back(&mut v, element);
+    v.push_back(element);
     v
 }
 
@@ -297,11 +298,11 @@ Create a vector of length 1 containing the passed in T. ## Function `destroy_empty` -Destroy the vector v. -Aborts if v is not empty. +Destroy the vector self. +Aborts if self is not empty. -
public fun destroy_empty<T>(v: smart_vector::SmartVector<T>)
+
public fun destroy_empty<T>(self: smart_vector::SmartVector<T>)
 
@@ -310,11 +311,11 @@ Aborts if v is not empty. Implementation -
public fun destroy_empty<T>(v: SmartVector<T>) {
-    assert!(is_empty(&v), error::invalid_argument(EVECTOR_NOT_EMPTY));
-    let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = v;
-    vector::destroy_empty(inline_vec);
-    option::destroy_none(big_vec);
+
public fun destroy_empty<T>(self: SmartVector<T>) {
+    assert!(self.is_empty(), error::invalid_argument(EVECTOR_NOT_EMPTY));
+    let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = self;
+    inline_vec.destroy_empty();
+    big_vec.destroy_none();
 }
 
@@ -329,7 +330,7 @@ Aborts if v is not empty. Destroy a vector completely when T has drop. -
public fun destroy<T: drop>(v: smart_vector::SmartVector<T>)
+
public fun destroy<T: drop>(self: smart_vector::SmartVector<T>)
 
@@ -338,9 +339,9 @@ Destroy a vector completely when T has drop. Implementation -
public fun destroy<T: drop>(v: SmartVector<T>) {
-    clear(&mut v);
-    destroy_empty(v);
+
public fun destroy<T: drop>(self: SmartVector<T>) {
+    self.clear();
+    self.destroy_empty();
 }
 
@@ -355,7 +356,7 @@ Destroy a vector completely when T has drop. Clear a vector completely when T has drop. -
public fun clear<T: drop>(v: &mut smart_vector::SmartVector<T>)
+
public fun clear<T: drop>(self: &mut smart_vector::SmartVector<T>)
 
@@ -364,10 +365,10 @@ Clear a vector completely when T has drop. Implementation -
public fun clear<T: drop>(v: &mut SmartVector<T>) {
-    v.inline_vec = vector[];
-    if (option::is_some(&v.big_vec)) {
-        big_vector::destroy(option::extract(&mut v.big_vec));
+
public fun clear<T: drop>(self: &mut SmartVector<T>) {
+    self.inline_vec = vector[];
+    if (self.big_vec.is_some()) {
+        self.big_vec.extract().destroy();
     }
 }
 
@@ -380,11 +381,11 @@ Clear a vector completely when T has drop. ## Function `borrow` -Acquire an immutable reference to the ith T of the vector v. +Acquire an immutable reference to the ith T of the vector self. Aborts if i is out of bounds. -
public fun borrow<T>(v: &smart_vector::SmartVector<T>, i: u64): &T
+
public fun borrow<T>(self: &smart_vector::SmartVector<T>, i: u64): &T
 
@@ -393,13 +394,13 @@ Aborts if i is out of bounds. Implementation -
public fun borrow<T>(v: &SmartVector<T>, i: u64): &T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
+
public fun borrow<T>(self: &SmartVector<T>, i: u64): &T {
+    assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = self.inline_vec.length();
     if (i < inline_len) {
-        vector::borrow(&v.inline_vec, i)
+        self.inline_vec.borrow(i)
     } else {
-        big_vector::borrow(option::borrow(&v.big_vec), i - inline_len)
+        self.big_vec.borrow().borrow(i - inline_len)
     }
 }
 
@@ -412,11 +413,11 @@ Aborts if i is out of bounds. ## Function `borrow_mut` -Return a mutable reference to the ith T in the vector v. +Return a mutable reference to the ith T in the vector self. Aborts if i is out of bounds. -
public fun borrow_mut<T>(v: &mut smart_vector::SmartVector<T>, i: u64): &mut T
+
public fun borrow_mut<T>(self: &mut smart_vector::SmartVector<T>, i: u64): &mut T
 
@@ -425,13 +426,13 @@ Aborts if i is out of bounds. Implementation -
public fun borrow_mut<T>(v: &mut SmartVector<T>, i: u64): &mut T {
-    assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
+
public fun borrow_mut<T>(self: &mut SmartVector<T>, i: u64): &mut T {
+    assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
+    let inline_len = self.inline_vec.length();
     if (i < inline_len) {
-        vector::borrow_mut(&mut v.inline_vec, i)
+        self.inline_vec.borrow_mut(i)
     } else {
-        big_vector::borrow_mut(option::borrow_mut(&mut v.big_vec), i - inline_len)
+        self.big_vec.borrow_mut().borrow_mut(i - inline_len)
     }
 }
 
@@ -444,12 +445,12 @@ Aborts if i is out of bounds. ## Function `append` -Empty and destroy the other vector, and push each of the Ts in the other vector onto the lhs vector in the +Empty and destroy the other vector, and push each of the Ts in the other vector onto the self vector in the same order as they occurred in other. Disclaimer: This function may be costly. Use it at your own discretion. -
public fun append<T: store>(lhs: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
+
public fun append<T: store>(self: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
 
@@ -458,19 +459,19 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun append<T: store>(lhs: &mut SmartVector<T>, other: SmartVector<T>) {
-    let other_len = length(&other);
+
public fun append<T: store>(self: &mut SmartVector<T>, other: SmartVector<T>) {
+    let other_len = other.length();
     let half_other_len = other_len / 2;
     let i = 0;
     while (i < half_other_len) {
-        push_back(lhs, swap_remove(&mut other, i));
-        i = i + 1;
+        self.push_back(other.swap_remove(i));
+        i += 1;
     };
     while (i < other_len) {
-        push_back(lhs, pop_back(&mut other));
-        i = i + 1;
+        self.push_back(other.pop_back());
+        i += 1;
     };
-    destroy_empty(other);
+    other.destroy_empty();
 }
 
@@ -485,7 +486,7 @@ Disclaimer: This function may be costly. Use it at your own discretion. Add multiple values to the vector at once. -
public fun add_all<T: store>(v: &mut smart_vector::SmartVector<T>, vals: vector<T>)
+
public fun add_all<T: store>(self: &mut smart_vector::SmartVector<T>, vals: vector<T>)
 
@@ -494,8 +495,8 @@ Add multiple values to the vector at once. Implementation -
public fun add_all<T: store>(v: &mut SmartVector<T>, vals: vector<T>) {
-    vector::for_each(vals, |val| { push_back(v, val); })
+
public fun add_all<T: store>(self: &mut SmartVector<T>, vals: vector<T>) {
+    vals.for_each(|val| { self.push_back(val); })
 }
 
@@ -512,7 +513,7 @@ atomic view of the whole vector. Disclaimer: This function may be costly as the smart vector may be huge in size. Use it at your own discretion. -
public fun to_vector<T: copy, store>(v: &smart_vector::SmartVector<T>): vector<T>
+
public fun to_vector<T: copy, store>(self: &smart_vector::SmartVector<T>): vector<T>
 
@@ -521,11 +522,11 @@ Disclaimer: This function may be costly as the smart vector may be huge in size. Implementation -
public fun to_vector<T: store + copy>(v: &SmartVector<T>): vector<T> {
-    let res = v.inline_vec;
-    if (option::is_some(&v.big_vec)) {
-        let big_vec = option::borrow(&v.big_vec);
-        vector::append(&mut res, big_vector::to_vector(big_vec));
+
public fun to_vector<T: store + copy>(self: &SmartVector<T>): vector<T> {
+    let res = self.inline_vec;
+    if (self.big_vec.is_some()) {
+        let big_vec = self.big_vec.borrow();
+        res.append(big_vec.to_vector());
     };
     res
 }
@@ -539,11 +540,11 @@ Disclaimer: This function may be costly as the smart vector may be huge in size.
 
 ## Function `push_back`
 
-Add T val to the end of the vector v. It grows the buckets when the current buckets are full.
+Add T val to the end of the vector self. It grows the buckets when the current buckets are full.
 This operation will cost more gas when it adds new bucket.
 
 
-
public fun push_back<T: store>(v: &mut smart_vector::SmartVector<T>, val: T)
+
public fun push_back<T: store>(self: &mut smart_vector::SmartVector<T>, val: T)
 
@@ -552,28 +553,28 @@ This operation will cost more gas when it adds new bucket. Implementation -
public fun push_back<T: store>(v: &mut SmartVector<T>, val: T) {
-    let len = length(v);
-    let inline_len = vector::length(&v.inline_vec);
+
public fun push_back<T: store>(self: &mut SmartVector<T>, val: T) {
+    let len = self.length();
+    let inline_len = self.inline_vec.length();
     if (len == inline_len) {
-        let bucket_size = if (option::is_some(&v.inline_capacity)) {
-            if (len < *option::borrow(&v.inline_capacity)) {
-                vector::push_back(&mut v.inline_vec, val);
+        let bucket_size = if (self.inline_capacity.is_some()) {
+            if (len < *self.inline_capacity.borrow()) {
+                self.inline_vec.push_back(val);
                 return
             };
-            *option::borrow(&v.bucket_size)
+            *self.bucket_size.borrow()
         } else {
             let val_size = size_of_val(&val);
             if (val_size * (inline_len + 1) < 150 /* magic number */) {
-                vector::push_back(&mut v.inline_vec, val);
+                self.inline_vec.push_back(val);
                 return
             };
-            let estimated_avg_size = max((size_of_val(&v.inline_vec) + val_size) / (inline_len + 1), 1);
+            let estimated_avg_size = max((size_of_val(&self.inline_vec) + val_size) / (inline_len + 1), 1);
             max(1024 /* free_write_quota */ / estimated_avg_size, 1)
         };
-        option::fill(&mut v.big_vec, big_vector::empty(bucket_size));
+        self.big_vec.fill(big_vector::empty(bucket_size));
     };
-    big_vector::push_back(option::borrow_mut(&mut v.big_vec), val);
+    self.big_vec.borrow_mut().push_back(val);
 }
 
@@ -585,11 +586,11 @@ This operation will cost more gas when it adds new bucket. ## Function `pop_back` -Pop an T from the end of vector v. It does shrink the buckets if they're empty. -Aborts if v is empty. +Pop an T from the end of vector self. It does shrink the buckets if they're empty. +Aborts if self is empty. -
public fun pop_back<T>(v: &mut smart_vector::SmartVector<T>): T
+
public fun pop_back<T>(self: &mut smart_vector::SmartVector<T>): T
 
@@ -598,20 +599,20 @@ Aborts if v is empty. Implementation -
public fun pop_back<T>(v: &mut SmartVector<T>): T {
-    assert!(!is_empty(v), error::invalid_state(EVECTOR_EMPTY));
-    let big_vec_wrapper = &mut v.big_vec;
-    if (option::is_some(big_vec_wrapper)) {
-        let big_vec = option::extract(big_vec_wrapper);
-        let val = big_vector::pop_back(&mut big_vec);
-        if (big_vector::is_empty(&big_vec)) {
-            big_vector::destroy_empty(big_vec)
+
public fun pop_back<T>(self: &mut SmartVector<T>): T {
+    assert!(!self.is_empty(), error::invalid_state(EVECTOR_EMPTY));
+    let big_vec_wrapper = &mut self.big_vec;
+    if (big_vec_wrapper.is_some()) {
+        let big_vec = big_vec_wrapper.extract();
+        let val = big_vec.pop_back();
+        if (big_vec.is_empty()) {
+            big_vec.destroy_empty()
         } else {
-            option::fill(big_vec_wrapper, big_vec);
+            big_vec_wrapper.fill(big_vec);
         };
         val
     } else {
-        vector::pop_back(&mut v.inline_vec)
+        self.inline_vec.pop_back()
     }
 }
 
@@ -624,12 +625,12 @@ Aborts if v is empty. ## Function `remove` -Remove the T at index i in the vector v and return the owned value that was previously stored at i in v. +Remove the T at index i in the vector self and return the owned value that was previously stored at i in self. All Ts occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. Disclaimer: This function may be costly. Use it at your own discretion. -
public fun remove<T>(v: &mut smart_vector::SmartVector<T>, i: u64): T
+
public fun remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
 
@@ -638,20 +639,20 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun remove<T>(v: &mut SmartVector<T>, i: u64): T {
-    let len = length(v);
+
public fun remove<T>(self: &mut SmartVector<T>, i: u64): T {
+    let len = self.length();
     assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
+    let inline_len = self.inline_vec.length();
     if (i < inline_len) {
-        vector::remove(&mut v.inline_vec, i)
+        self.inline_vec.remove(i)
     } else {
-        let big_vec_wrapper = &mut v.big_vec;
-        let big_vec = option::extract(big_vec_wrapper);
-        let val = big_vector::remove(&mut big_vec, i - inline_len);
-        if (big_vector::is_empty(&big_vec)) {
-            big_vector::destroy_empty(big_vec)
+        let big_vec_wrapper = &mut self.big_vec;
+        let big_vec = big_vec_wrapper.extract();
+        let val = big_vec.remove(i - inline_len);
+        if (big_vec.is_empty()) {
+            big_vec.destroy_empty()
         } else {
-            option::fill(big_vec_wrapper, big_vec);
+            big_vec_wrapper.fill(big_vec);
         };
         val
     }
@@ -666,12 +667,12 @@ Disclaimer: This function may be costly. Use it at your own discretion.
 
 ## Function `swap_remove`
 
-Swap the ith T of the vector v with the last T and then pop the vector.
+Swap the ith T of the vector self with the last T and then pop the vector.
 This is O(1), but does not preserve ordering of Ts in the vector.
 Aborts if i is out of bounds.
 
 
-
public fun swap_remove<T>(v: &mut smart_vector::SmartVector<T>, i: u64): T
+
public fun swap_remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
 
@@ -680,33 +681,33 @@ Aborts if i is out of bounds. Implementation -
public fun swap_remove<T>(v: &mut SmartVector<T>, i: u64): T {
-    let len = length(v);
+
public fun swap_remove<T>(self: &mut SmartVector<T>, i: u64): T {
+    let len = self.length();
     assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
-    let big_vec_wrapper = &mut v.big_vec;
-    let inline_vec = &mut v.inline_vec;
+    let inline_len = self.inline_vec.length();
+    let big_vec_wrapper = &mut self.big_vec;
+    let inline_vec = &mut self.inline_vec;
     if (i >= inline_len) {
-        let big_vec = option::extract(big_vec_wrapper);
-        let val = big_vector::swap_remove(&mut big_vec, i - inline_len);
-        if (big_vector::is_empty(&big_vec)) {
-            big_vector::destroy_empty(big_vec)
+        let big_vec = big_vec_wrapper.extract();
+        let val = big_vec.swap_remove(i - inline_len);
+        if (big_vec.is_empty()) {
+            big_vec.destroy_empty()
         } else {
-            option::fill(big_vec_wrapper, big_vec);
+            big_vec_wrapper.fill(big_vec);
         };
         val
     } else {
         if (inline_len < len) {
-            let big_vec = option::extract(big_vec_wrapper);
-            let last_from_big_vec = big_vector::pop_back(&mut big_vec);
-            if (big_vector::is_empty(&big_vec)) {
-                big_vector::destroy_empty(big_vec)
+            let big_vec = big_vec_wrapper.extract();
+            let last_from_big_vec = big_vec.pop_back();
+            if (big_vec.is_empty()) {
+                big_vec.destroy_empty()
             } else {
-                option::fill(big_vec_wrapper, big_vec);
+                big_vec_wrapper.fill(big_vec);
             };
-            vector::push_back(inline_vec, last_from_big_vec);
+            inline_vec.push_back(last_from_big_vec);
         };
-        vector::swap_remove(inline_vec, i)
+        inline_vec.swap_remove(i)
     }
 }
 
@@ -720,10 +721,10 @@ Aborts if i is out of bounds. ## Function `swap` Swap the Ts at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds -for v. +for self. -
public fun swap<T: store>(v: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
+
public fun swap<T: store>(self: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
 
@@ -732,26 +733,26 @@ for v. Implementation -
public fun swap<T: store>(v: &mut SmartVector<T>, i: u64, j: u64) {
+
public fun swap<T: store>(self: &mut SmartVector<T>, i: u64, j: u64) {
     if (i > j) {
-        return swap(v, j, i)
+        return self.swap(j, i)
     };
-    let len = length(v);
+    let len = self.length();
     assert!(j < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS));
-    let inline_len = vector::length(&v.inline_vec);
+    let inline_len = self.inline_vec.length();
     if (i >= inline_len) {
-        big_vector::swap(option::borrow_mut(&mut v.big_vec), i - inline_len, j - inline_len);
+        self.big_vec.borrow_mut().swap(i - inline_len, j - inline_len);
     } else if (j < inline_len) {
-        vector::swap(&mut v.inline_vec, i, j);
+        self.inline_vec.swap(i, j);
     } else {
-        let big_vec = option::borrow_mut(&mut v.big_vec);
-        let inline_vec = &mut v.inline_vec;
-        let element_i = vector::swap_remove(inline_vec, i);
-        let element_j = big_vector::swap_remove(big_vec, j - inline_len);
-        vector::push_back(inline_vec, element_j);
-        vector::swap(inline_vec, i, inline_len - 1);
-        big_vector::push_back(big_vec, element_i);
-        big_vector::swap(big_vec, j - inline_len, len - inline_len - 1);
+        let big_vec = self.big_vec.borrow_mut();
+        let inline_vec = &mut self.inline_vec;
+        let element_i = inline_vec.swap_remove(i);
+        let element_j = big_vec.swap_remove(j - inline_len);
+        inline_vec.push_back(element_j);
+        inline_vec.swap(i, inline_len - 1);
+        big_vec.push_back(element_i);
+        big_vec.swap(j - inline_len, len - inline_len - 1);
     }
 }
 
@@ -764,11 +765,11 @@ for v. ## Function `reverse` -Reverse the order of the Ts in the vector v in-place. +Reverse the order of the Ts in the vector self in-place. Disclaimer: This function may be costly. Use it at your own discretion. -
public fun reverse<T: store>(v: &mut smart_vector::SmartVector<T>)
+
public fun reverse<T: store>(self: &mut smart_vector::SmartVector<T>)
 
@@ -777,35 +778,33 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun reverse<T: store>(v: &mut SmartVector<T>) {
-    let inline_len = vector::length(&v.inline_vec);
-    let i = 0;
+
public fun reverse<T: store>(self: &mut SmartVector<T>) {
+    let inline_len = self.inline_vec.length();
     let new_inline_vec = vector[];
     // Push the last `inline_len` Ts into a temp vector.
-    while (i < inline_len) {
-        vector::push_back(&mut new_inline_vec, pop_back(v));
-        i = i + 1;
+    for (i in 0..inline_len) {
+        new_inline_vec.push_back(self.pop_back());
     };
-    vector::reverse(&mut new_inline_vec);
+    new_inline_vec.reverse();
     // Reverse the big_vector left if exists.
-    if (option::is_some(&v.big_vec)) {
-        big_vector::reverse(option::borrow_mut(&mut v.big_vec));
+    if (self.big_vec.is_some()) {
+        self.big_vec.borrow_mut().reverse();
     };
     // Mem::swap the two vectors.
     let temp_vec = vector[];
-    while (!vector::is_empty(&mut v.inline_vec)) {
-        vector::push_back(&mut temp_vec, vector::pop_back(&mut v.inline_vec));
+    while (!self.inline_vec.is_empty()) {
+        temp_vec.push_back(self.inline_vec.pop_back());
     };
-    vector::reverse(&mut temp_vec);
-    while (!vector::is_empty(&mut new_inline_vec)) {
-        vector::push_back(&mut v.inline_vec, vector::pop_back(&mut new_inline_vec));
+    temp_vec.reverse();
+    while (!new_inline_vec.is_empty()) {
+        self.inline_vec.push_back(new_inline_vec.pop_back());
     };
-    vector::destroy_empty(new_inline_vec);
+    new_inline_vec.destroy_empty();
     // Push the rest Ts originally left in inline_vector back to the end of the smart vector.
-    while (!vector::is_empty(&mut temp_vec)) {
-        push_back(v, vector::pop_back(&mut temp_vec));
+    while (!temp_vec.is_empty()) {
+        self.push_back(temp_vec.pop_back());
     };
-    vector::destroy_empty(temp_vec);
+    temp_vec.destroy_empty();
 }
 
@@ -817,12 +816,12 @@ Disclaimer: This function may be costly. Use it at your own discretion. ## Function `index_of` -Return (true, i) if val is in the vector v at index i. +Return (true, i) if val is in the vector self at index i. Otherwise, returns (false, 0). Disclaimer: This function may be costly. Use it at your own discretion. -
public fun index_of<T>(v: &smart_vector::SmartVector<T>, val: &T): (bool, u64)
+
public fun index_of<T>(self: &smart_vector::SmartVector<T>, val: &T): (bool, u64)
 
@@ -831,13 +830,13 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun index_of<T>(v: &SmartVector<T>, val: &T): (bool, u64) {
-    let (found, i) = vector::index_of(&v.inline_vec, val);
+
public fun index_of<T>(self: &SmartVector<T>, val: &T): (bool, u64) {
+    let (found, i) = self.inline_vec.index_of(val);
     if (found) {
         (true, i)
-    } else if (option::is_some(&v.big_vec)) {
-        let (found, i) = big_vector::index_of(option::borrow(&v.big_vec), val);
-        (found, i + vector::length(&v.inline_vec))
+    } else if (self.big_vec.is_some()) {
+        let (found, i) = self.big_vec.borrow().index_of(val);
+        (found, i + self.inline_vec.length())
     } else {
         (false, 0)
     }
@@ -852,11 +851,11 @@ Disclaimer: This function may be costly. Use it at your own discretion.
 
 ## Function `contains`
 
-Return true if val is in the vector v.
+Return true if val is in the vector self.
 Disclaimer: This function may be costly. Use it at your own discretion.
 
 
-
public fun contains<T>(v: &smart_vector::SmartVector<T>, val: &T): bool
+
public fun contains<T>(self: &smart_vector::SmartVector<T>, val: &T): bool
 
@@ -865,9 +864,9 @@ Disclaimer: This function may be costly. Use it at your own discretion. Implementation -
public fun contains<T>(v: &SmartVector<T>, val: &T): bool {
-    if (is_empty(v)) return false;
-    let (exist, _) = index_of(v, val);
+
public fun contains<T>(self: &SmartVector<T>, val: &T): bool {
+    if (self.is_empty()) return false;
+    let (exist, _) = self.index_of(val);
     exist
 }
 
@@ -883,7 +882,7 @@ Disclaimer: This function may be costly. Use it at your own discretion. Return the length of the vector. -
public fun length<T>(v: &smart_vector::SmartVector<T>): u64
+
public fun length<T>(self: &smart_vector::SmartVector<T>): u64
 
@@ -892,11 +891,11 @@ Return the length of the vector. Implementation -
public fun length<T>(v: &SmartVector<T>): u64 {
-    vector::length(&v.inline_vec) + if (option::is_none(&v.big_vec)) {
+
public fun length<T>(self: &SmartVector<T>): u64 {
+    self.inline_vec.length() + if (self.big_vec.is_none()) {
         0
     } else {
-        big_vector::length(option::borrow(&v.big_vec))
+        self.big_vec.borrow().length()
     }
 }
 
@@ -909,10 +908,10 @@ Return the length of the vector. ## Function `is_empty` -Return true if the vector v has no Ts and false otherwise. +Return true if the vector self has no Ts and false otherwise. -
public fun is_empty<T>(v: &smart_vector::SmartVector<T>): bool
+
public fun is_empty<T>(self: &smart_vector::SmartVector<T>): bool
 
@@ -921,8 +920,8 @@ Return true if the vector v has no Ts and Implementation -
public fun is_empty<T>(v: &SmartVector<T>): bool {
-    length(v) == 0
+
public fun is_empty<T>(self: &SmartVector<T>): bool {
+    self.length() == 0
 }
 
@@ -937,7 +936,7 @@ Return true if the vector v has no Ts and public fun for_each<T: store>(v: smart_vector::SmartVector<T>, f: |T|) +
public fun for_each<T: store>(self: smart_vector::SmartVector<T>, f: |T|)
 
@@ -946,9 +945,9 @@ Apply the function to each T in the vector, consuming it. Implementation -
public inline fun for_each<T: store>(v: SmartVector<T>, f: |T|) {
-    aptos_std::smart_vector::reverse(&mut v); // We need to reverse the vector to consume it efficiently
-    aptos_std::smart_vector::for_each_reverse(v, |e| f(e));
+
public inline fun for_each<T: store>(self: SmartVector<T>, f: |T|) {
+    self.reverse(); // We need to reverse the vector to consume it efficiently
+    self.for_each_reverse(|e| f(e));
 }
 
@@ -963,7 +962,7 @@ Apply the function to each T in the vector, consuming it. Apply the function to each T in the vector, consuming it. -
public fun for_each_reverse<T>(v: smart_vector::SmartVector<T>, f: |T|)
+
public fun for_each_reverse<T>(self: smart_vector::SmartVector<T>, f: |T|)
 
@@ -972,13 +971,13 @@ Apply the function to each T in the vector, consuming it. Implementation -
public inline fun for_each_reverse<T>(v: SmartVector<T>, f: |T|) {
-    let len = aptos_std::smart_vector::length(&v);
+
public inline fun for_each_reverse<T>(self: SmartVector<T>, f: |T|) {
+    let len = self.length();
     while (len > 0) {
-        f(aptos_std::smart_vector::pop_back(&mut v));
-        len = len - 1;
+        f(self.pop_back());
+        len -= 1;
     };
-    aptos_std::smart_vector::destroy_empty(v)
+    self.destroy_empty()
 }
 
@@ -993,7 +992,7 @@ Apply the function to each T in the vector, consuming it. Apply the function to a reference of each T in the vector. -
public fun for_each_ref<T>(v: &smart_vector::SmartVector<T>, f: |&T|)
+
public fun for_each_ref<T>(self: &smart_vector::SmartVector<T>, f: |&T|)
 
@@ -1002,12 +1001,10 @@ Apply the function to a reference of each T in the vector. Implementation -
public inline fun for_each_ref<T>(v: &SmartVector<T>, f: |&T|) {
-    let i = 0;
-    let len = aptos_std::smart_vector::length(v);
-    while (i < len) {
-        f(aptos_std::smart_vector::borrow(v, i));
-        i = i + 1
+
public inline fun for_each_ref<T>(self: &SmartVector<T>, f: |&T|) {
+    let len = self.length();
+    for (i in 0..len) {
+        f(self.borrow(i));
     }
 }
 
@@ -1023,7 +1020,7 @@ Apply the function to a reference of each T in the vector. Apply the function to a mutable reference to each T in the vector. -
public fun for_each_mut<T>(v: &mut smart_vector::SmartVector<T>, f: |&mut T|)
+
public fun for_each_mut<T>(self: &mut smart_vector::SmartVector<T>, f: |&mut T|)
 
@@ -1032,12 +1029,10 @@ Apply the function to a mutable reference to each T in the vector. Implementation -
public inline fun for_each_mut<T>(v: &mut SmartVector<T>, f: |&mut T|) {
-    let i = 0;
-    let len = aptos_std::smart_vector::length(v);
-    while (i < len) {
-        f(aptos_std::smart_vector::borrow_mut(v, i));
-        i = i + 1
+
public inline fun for_each_mut<T>(self: &mut SmartVector<T>, f: |&mut T|) {
+    let len = self.length();
+    for (i in 0..len) {
+        f(self.borrow_mut(i));
     }
 }
 
@@ -1053,7 +1048,7 @@ Apply the function to a mutable reference to each T in the vector. Apply the function to a reference of each T in the vector with its index. -
public fun enumerate_ref<T>(v: &smart_vector::SmartVector<T>, f: |(u64, &T)|)
+
public fun enumerate_ref<T>(self: &smart_vector::SmartVector<T>, f: |(u64, &T)|)
 
@@ -1062,12 +1057,10 @@ Apply the function to a reference of each T in the vector with its index. Implementation -
public inline fun enumerate_ref<T>(v: &SmartVector<T>, f: |u64, &T|) {
-    let i = 0;
-    let len = aptos_std::smart_vector::length(v);
-    while (i < len) {
-        f(i, aptos_std::smart_vector::borrow(v, i));
-        i = i + 1;
+
public inline fun enumerate_ref<T>(self: &SmartVector<T>, f: |u64, &T|) {
+    let len = self.length();
+    for (i in 0..len) {
+        f(i, self.borrow(i));
     };
 }
 
@@ -1083,7 +1076,7 @@ Apply the function to a reference of each T in the vector with its index. Apply the function to a mutable reference of each T in the vector with its index. -
public fun enumerate_mut<T>(v: &mut smart_vector::SmartVector<T>, f: |(u64, &mut T)|)
+
public fun enumerate_mut<T>(self: &mut smart_vector::SmartVector<T>, f: |(u64, &mut T)|)
 
@@ -1092,12 +1085,10 @@ Apply the function to a mutable reference of each T in the vector with its index Implementation -
public inline fun enumerate_mut<T>(v: &mut SmartVector<T>, f: |u64, &mut T|) {
-    let i = 0;
-    let len = length(v);
-    while (i < len) {
-        f(i, borrow_mut(v, i));
-        i = i + 1;
+
public inline fun enumerate_mut<T>(self: &mut SmartVector<T>, f: |u64, &mut T|) {
+    let len = self.length();
+    for (i in 0..len) {
+        f(i, self.borrow_mut(i));
     };
 }
 
@@ -1114,7 +1105,7 @@ Fold the function over the Ts. For example, fold<Accumulator, T: store>(v: smart_vector::SmartVector<T>, init: Accumulator, f: |(Accumulator, T)|Accumulator): Accumulator +
public fun fold<Accumulator, T: store>(self: smart_vector::SmartVector<T>, init: Accumulator, f: |(Accumulator, T)|Accumulator): Accumulator
 
@@ -1124,12 +1115,12 @@ Fold the function over the Ts. For example, fold<Accumulator, T: store>( - v: SmartVector<T>, + self: SmartVector<T>, init: Accumulator, f: |Accumulator, T|Accumulator ): Accumulator { let accu = init; - aptos_std::smart_vector::for_each(v, |elem| accu = f(accu, elem)); + self.for_each(|elem| accu = f(accu, elem)); accu }
@@ -1146,7 +1137,7 @@ Fold right like fold above but working right to left. For example, f(1, f(2, f(3, 0))) -
public fun foldr<Accumulator, T>(v: smart_vector::SmartVector<T>, init: Accumulator, f: |(T, Accumulator)|Accumulator): Accumulator
+
public fun foldr<Accumulator, T>(self: smart_vector::SmartVector<T>, init: Accumulator, f: |(T, Accumulator)|Accumulator): Accumulator
 
@@ -1156,12 +1147,12 @@ Fold right like fold above but working right to left. For example, public inline fun foldr<Accumulator, T>( - v: SmartVector<T>, + self: SmartVector<T>, init: Accumulator, f: |T, Accumulator|Accumulator ): Accumulator { let accu = init; - aptos_std::smart_vector::for_each_reverse(v, |elem| accu = f(elem, accu)); + self.for_each_reverse(|elem| accu = f(elem, accu)); accu }
@@ -1178,7 +1169,7 @@ Map the function over the references of the Ts of the vector, producing a new ve original vector. -
public fun map_ref<T1, T2: store>(v: &smart_vector::SmartVector<T1>, f: |&T1|T2): smart_vector::SmartVector<T2>
+
public fun map_ref<T1, T2: store>(self: &smart_vector::SmartVector<T1>, f: |&T1|T2): smart_vector::SmartVector<T2>
 
@@ -1188,11 +1179,11 @@ original vector.
public inline fun map_ref<T1, T2: store>(
-    v: &SmartVector<T1>,
+    self: &SmartVector<T1>,
     f: |&T1|T2
 ): SmartVector<T2> {
     let result = aptos_std::smart_vector::new<T2>();
-    aptos_std::smart_vector::for_each_ref(v, |elem| aptos_std::smart_vector::push_back(&mut result, f(elem)));
+    self.for_each_ref(|elem| result.push_back(f(elem)));
     result
 }
 
@@ -1208,7 +1199,7 @@ original vector. Map the function over the Ts of the vector, producing a new vector. -
public fun map<T1: store, T2: store>(v: smart_vector::SmartVector<T1>, f: |T1|T2): smart_vector::SmartVector<T2>
+
public fun map<T1: store, T2: store>(self: smart_vector::SmartVector<T1>, f: |T1|T2): smart_vector::SmartVector<T2>
 
@@ -1218,11 +1209,11 @@ Map the function over the Ts of the vector, producing a new vector.
public inline fun map<T1: store, T2: store>(
-    v: SmartVector<T1>,
+    self: SmartVector<T1>,
     f: |T1|T2
 ): SmartVector<T2> {
     let result = aptos_std::smart_vector::new<T2>();
-    aptos_std::smart_vector::for_each(v, |elem| push_back(&mut result, f(elem)));
+    self.for_each(|elem| result.push_back(f(elem)));
     result
 }
 
@@ -1238,7 +1229,7 @@ Map the function over the Ts of the vector, producing a new vector. Filter the vector using the boolean function, removing all Ts for which p(e) is not true. -
public fun filter<T: drop, store>(v: smart_vector::SmartVector<T>, p: |&T|bool): smart_vector::SmartVector<T>
+
public fun filter<T: drop, store>(self: smart_vector::SmartVector<T>, p: |&T|bool): smart_vector::SmartVector<T>
 
@@ -1248,12 +1239,12 @@ Filter the vector using the boolean function, removing all Ts for which p(
public inline fun filter<T: store + drop>(
-    v: SmartVector<T>,
+    self: SmartVector<T>,
     p: |&T|bool
 ): SmartVector<T> {
     let result = aptos_std::smart_vector::new<T>();
-    aptos_std::smart_vector::for_each(v, |elem| {
-        if (p(&elem)) aptos_std::smart_vector::push_back(&mut result, elem);
+    self.for_each(|elem| {
+        if (p(&elem)) result.push_back(elem);
     });
     result
 }
@@ -1269,7 +1260,7 @@ Filter the vector using the boolean function, removing all Ts for which p(
 
 
 
-
public fun zip<T1: store, T2: store>(v1: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
+
public fun zip<T1: store, T2: store>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
 
@@ -1278,11 +1269,11 @@ Filter the vector using the boolean function, removing all Ts for which p( Implementation -
public inline fun zip<T1: store, T2: store>(v1: SmartVector<T1>, v2: SmartVector<T2>, f: |T1, T2|) {
+
public inline fun zip<T1: store, T2: store>(self: SmartVector<T1>, v2: SmartVector<T2>, f: |T1, T2|) {
     // We need to reverse the vectors to consume it efficiently
-    aptos_std::smart_vector::reverse(&mut v1);
-    aptos_std::smart_vector::reverse(&mut v2);
-    aptos_std::smart_vector::zip_reverse(v1, v2, |e1, e2| f(e1, e2));
+    self.reverse();
+    v2.reverse();
+    self.zip_reverse(v2, |e1, e2| f(e1, e2));
 }
 
@@ -1298,7 +1289,7 @@ Apply the function to each pair of elements in the two given vectors in the reve This errors out if the vectors are not of the same length. -
public fun zip_reverse<T1, T2>(v1: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
+
public fun zip_reverse<T1, T2>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|)
 
@@ -1308,20 +1299,20 @@ This errors out if the vectors are not of the same length.
public inline fun zip_reverse<T1, T2>(
-    v1: SmartVector<T1>,
+    self: SmartVector<T1>,
     v2: SmartVector<T2>,
     f: |T1, T2|,
 ) {
-    let len = aptos_std::smart_vector::length(&v1);
+    let len = self.length();
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(len == aptos_std::smart_vector::length(&v2), 0x20005);
+    assert!(len == v2.length(), 0x20005);
     while (len > 0) {
-        f(aptos_std::smart_vector::pop_back(&mut v1), aptos_std::smart_vector::pop_back(&mut v2));
-        len = len - 1;
+        f(self.pop_back(), v2.pop_back());
+        len -= 1;
     };
-    aptos_std::smart_vector::destroy_empty(v1);
-    aptos_std::smart_vector::destroy_empty(v2);
+    self.destroy_empty();
+    v2.destroy_empty();
 }
 
@@ -1337,7 +1328,7 @@ Apply the function to the references of each pair of elements in the two given v This errors out if the vectors are not of the same length. -
public fun zip_ref<T1, T2>(v1: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|)
+
public fun zip_ref<T1, T2>(self: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|)
 
@@ -1347,18 +1338,16 @@ This errors out if the vectors are not of the same length.
public inline fun zip_ref<T1, T2>(
-    v1: &SmartVector<T1>,
+    self: &SmartVector<T1>,
     v2: &SmartVector<T2>,
     f: |&T1, &T2|,
 ) {
-    let len = aptos_std::smart_vector::length(v1);
+    let len = self.length();
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(len == aptos_std::smart_vector::length(v2), 0x20005);
-    let i = 0;
-    while (i < len) {
-        f(aptos_std::smart_vector::borrow(v1, i), aptos_std::smart_vector::borrow(v2, i));
-        i = i + 1
+    assert!(len == v2.length(), 0x20005);
+    for (i in 0..len) {
+        f(self.borrow(i), v2.borrow(i));
     }
 }
 
@@ -1375,7 +1364,7 @@ Apply the function to mutable references to each pair of elements in the two giv This errors out if the vectors are not of the same length. -
public fun zip_mut<T1, T2>(v1: &mut smart_vector::SmartVector<T1>, v2: &mut smart_vector::SmartVector<T2>, f: |(&mut T1, &mut T2)|)
+
public fun zip_mut<T1, T2>(self: &mut smart_vector::SmartVector<T1>, v2: &mut smart_vector::SmartVector<T2>, f: |(&mut T1, &mut T2)|)
 
@@ -1385,18 +1374,16 @@ This errors out if the vectors are not of the same length.
public inline fun zip_mut<T1, T2>(
-    v1: &mut SmartVector<T1>,
+    self: &mut SmartVector<T1>,
     v2: &mut SmartVector<T2>,
     f: |&mut T1, &mut T2|,
 ) {
-    let i = 0;
-    let len = aptos_std::smart_vector::length(v1);
+    let len = self.length();
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(len == aptos_std::smart_vector::length(v2), 0x20005);
-    while (i < len) {
-        f(aptos_std::smart_vector::borrow_mut(v1, i), aptos_std::smart_vector::borrow_mut(v2, i));
-        i = i + 1
+    assert!(len == v2.length(), 0x20005);
+    for (i in 0..len) {
+        f(self.borrow_mut(i), v2.borrow_mut(i));
     }
 }
 
@@ -1412,7 +1399,7 @@ This errors out if the vectors are not of the same length. Map the function over the element pairs of the two vectors, producing a new vector. -
public fun zip_map<T1: store, T2: store, NewT: store>(v1: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|NewT): smart_vector::SmartVector<NewT>
+
public fun zip_map<T1: store, T2: store, NewT: store>(self: smart_vector::SmartVector<T1>, v2: smart_vector::SmartVector<T2>, f: |(T1, T2)|NewT): smart_vector::SmartVector<NewT>
 
@@ -1422,16 +1409,16 @@ Map the function over the element pairs of the two vectors, producing a new vect
public inline fun zip_map<T1: store, T2: store, NewT: store>(
-    v1: SmartVector<T1>,
+    self: SmartVector<T1>,
     v2: SmartVector<T2>,
     f: |T1, T2|NewT
 ): SmartVector<NewT> {
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(aptos_std::smart_vector::length(&v1) == aptos_std::smart_vector::length(&v2), 0x20005);
+    assert!(self.length() == v2.length(), 0x20005);
 
     let result = aptos_std::smart_vector::new<NewT>();
-    aptos_std::smart_vector::zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    self.zip(v2, |e1, e2| result.push_back(f(e1, e2)));
     result
 }
 
@@ -1448,7 +1435,7 @@ Map the function over the references of the element pairs of two vectors, produc values without modifying the original vectors. -
public fun zip_map_ref<T1, T2, NewT: store>(v1: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|NewT): smart_vector::SmartVector<NewT>
+
public fun zip_map_ref<T1, T2, NewT: store>(self: &smart_vector::SmartVector<T1>, v2: &smart_vector::SmartVector<T2>, f: |(&T1, &T2)|NewT): smart_vector::SmartVector<NewT>
 
@@ -1458,16 +1445,16 @@ values without modifying the original vectors.
public inline fun zip_map_ref<T1, T2, NewT: store>(
-    v1: &SmartVector<T1>,
+    self: &SmartVector<T1>,
     v2: &SmartVector<T2>,
     f: |&T1, &T2|NewT
 ): SmartVector<NewT> {
     // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(aptos_std::smart_vector::length(v1) == aptos_std::smart_vector::length(v2), 0x20005);
+    assert!(self.length() == v2.length(), 0x20005);
 
     let result = aptos_std::smart_vector::new<NewT>();
-    aptos_std::smart_vector::zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    self.zip_ref(v2, |e1, e2| result.push_back(f(e1, e2)));
     result
 }
 
@@ -1520,12 +1507,12 @@ values without modifying the original vectors. -
invariant option::is_none(bucket_size)
-    || (option::is_some(bucket_size) && option::borrow(bucket_size) != 0);
-invariant option::is_none(inline_capacity)
-    || (len(inline_vec) <= option::borrow(inline_capacity));
-invariant (option::is_none(inline_capacity) && option::is_none(bucket_size))
-    || (option::is_some(inline_capacity) && option::is_some(bucket_size));
+
invariant bucket_size.is_none()
+    || (bucket_size.is_some() && bucket_size.borrow() != 0);
+invariant inline_capacity.is_none()
+    || (len(inline_vec) <= inline_capacity.borrow());
+invariant (inline_capacity.is_none() && bucket_size.is_none())
+    || (inline_capacity.is_some() && bucket_size.is_some());
 
@@ -1563,20 +1550,36 @@ values without modifying the original vectors. + + +### Function `singleton` + + +
public fun singleton<T: store>(element: T): smart_vector::SmartVector<T>
+
+ + + + +
pragma verify = false;
+
+ + + ### Function `destroy_empty` -
public fun destroy_empty<T>(v: smart_vector::SmartVector<T>)
+
public fun destroy_empty<T>(self: smart_vector::SmartVector<T>)
 
-
aborts_if !(is_empty(v));
-aborts_if len(v.inline_vec) != 0
-    || option::is_some(v.big_vec);
+
aborts_if !(self.is_empty());
+aborts_if len(self.inline_vec) != 0
+    || self.big_vec.is_some();
 
@@ -1586,15 +1589,15 @@ values without modifying the original vectors. ### Function `borrow` -
public fun borrow<T>(v: &smart_vector::SmartVector<T>, i: u64): &T
+
public fun borrow<T>(self: &smart_vector::SmartVector<T>, i: u64): &T
 
-
aborts_if i >= length(v);
-aborts_if option::is_some(v.big_vec) && (
-    (len(v.inline_vec) + big_vector::length<T>(option::borrow(v.big_vec))) > MAX_U64
+
aborts_if i >= self.length();
+aborts_if self.big_vec.is_some() && (
+    (len(self.inline_vec) + self.big_vec.borrow().length::<T>()) > MAX_U64
 );
 
@@ -1605,7 +1608,7 @@ values without modifying the original vectors. ### Function `append` -
public fun append<T: store>(lhs: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
+
public fun append<T: store>(self: &mut smart_vector::SmartVector<T>, other: smart_vector::SmartVector<T>)
 
@@ -1621,7 +1624,7 @@ values without modifying the original vectors. ### Function `push_back` -
public fun push_back<T: store>(v: &mut smart_vector::SmartVector<T>, val: T)
+
public fun push_back<T: store>(self: &mut smart_vector::SmartVector<T>, val: T)
 
@@ -1637,21 +1640,21 @@ values without modifying the original vectors. ### Function `pop_back` -
public fun pop_back<T>(v: &mut smart_vector::SmartVector<T>): T
+
public fun pop_back<T>(self: &mut smart_vector::SmartVector<T>): T
 
pragma verify_duration_estimate = 120;
-aborts_if  option::is_some(v.big_vec)
+aborts_if  self.big_vec.is_some()
     &&
-    (table_with_length::spec_len(option::borrow(v.big_vec).buckets) == 0);
-aborts_if is_empty(v);
-aborts_if option::is_some(v.big_vec) && (
-    (len(v.inline_vec) + big_vector::length<T>(option::borrow(v.big_vec))) > MAX_U64
+    (table_with_length::spec_len(self.big_vec.borrow().buckets) == 0);
+aborts_if self.is_empty();
+aborts_if self.big_vec.is_some() && (
+    (len(self.inline_vec) + self.big_vec.borrow().length::<T>()) > MAX_U64
 );
-ensures length(v) == length(old(v)) - 1;
+ensures self.length() == old(self).length() - 1;
 
@@ -1661,7 +1664,7 @@ values without modifying the original vectors. ### Function `remove` -
public fun remove<T>(v: &mut smart_vector::SmartVector<T>, i: u64): T
+
public fun remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
 
@@ -1677,18 +1680,18 @@ values without modifying the original vectors. ### Function `swap_remove` -
public fun swap_remove<T>(v: &mut smart_vector::SmartVector<T>, i: u64): T
+
public fun swap_remove<T>(self: &mut smart_vector::SmartVector<T>, i: u64): T
 
pragma verify = false;
-aborts_if i >= length(v);
-aborts_if option::is_some(v.big_vec) && (
-    (len(v.inline_vec) + big_vector::length<T>(option::borrow(v.big_vec))) > MAX_U64
+aborts_if i >= self.length();
+aborts_if self.big_vec.is_some() && (
+    (len(self.inline_vec) + self.big_vec.borrow().length::<T>()) > MAX_U64
 );
-ensures length(v) == length(old(v)) - 1;
+ensures self.length() == old(self).length() - 1;
 
@@ -1698,7 +1701,7 @@ values without modifying the original vectors. ### Function `swap` -
public fun swap<T: store>(v: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
+
public fun swap<T: store>(self: &mut smart_vector::SmartVector<T>, i: u64, j: u64)
 
@@ -1714,13 +1717,14 @@ values without modifying the original vectors. ### Function `length` -
public fun length<T>(v: &smart_vector::SmartVector<T>): u64
+
public fun length<T>(self: &smart_vector::SmartVector<T>): u64
 
-
aborts_if option::is_some(v.big_vec) && len(v.inline_vec) + big_vector::length(option::spec_borrow(v.big_vec)) > MAX_U64;
+
aborts_if self.big_vec.is_some() && len(self.inline_vec) + option::spec_borrow(
+    self.big_vec).length() > MAX_U64;
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/storage_slots_allocator.md b/aptos-move/framework/aptos-stdlib/doc/storage_slots_allocator.md new file mode 100644 index 0000000000000..5337888530b74 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/doc/storage_slots_allocator.md @@ -0,0 +1,876 @@ + + + +# Module `0x1::storage_slots_allocator` + +Abstraction to having "addressable" storage slots (i.e. items) in global storage. +Addresses are local u64 values (unique within a single StorageSlotsAllocator instance, +but can and do overlap across instances). + +Allows optionally to initialize slots (and pay for them upfront), and then reuse them, +providing predictable storage costs. + +If we need to mutate multiple slots at the same time, we can workaround borrow_mut preventing us from that, +via provided pair of remove_and_reserve and fill_reserved_slot methods, to do so in non-conflicting manner. + +Similarly allows getting an address upfront via reserve_slot, for a slot created +later (i.e. if we need address to initialize the value itself). + +In the future, more sophisticated strategies can be added, without breaking/modifying callers, +for example: +* inlining some nodes +* having a fee-payer for any storage creation operations + + +- [Enum `Link`](#0x1_storage_slots_allocator_Link) +- [Enum `StorageSlotsAllocator`](#0x1_storage_slots_allocator_StorageSlotsAllocator) +- [Struct `ReservedSlot`](#0x1_storage_slots_allocator_ReservedSlot) +- [Struct `StoredSlot`](#0x1_storage_slots_allocator_StoredSlot) +- [Constants](#@Constants_0) +- [Function `new`](#0x1_storage_slots_allocator_new) +- [Function `allocate_spare_slots`](#0x1_storage_slots_allocator_allocate_spare_slots) +- [Function `get_num_spare_slot_count`](#0x1_storage_slots_allocator_get_num_spare_slot_count) +- [Function `add`](#0x1_storage_slots_allocator_add) +- [Function `remove`](#0x1_storage_slots_allocator_remove) +- [Function `destroy_empty`](#0x1_storage_slots_allocator_destroy_empty) +- [Function `borrow`](#0x1_storage_slots_allocator_borrow) +- [Function `borrow_mut`](#0x1_storage_slots_allocator_borrow_mut) +- [Function `reserve_slot`](#0x1_storage_slots_allocator_reserve_slot) +- [Function `fill_reserved_slot`](#0x1_storage_slots_allocator_fill_reserved_slot) +- [Function `remove_and_reserve`](#0x1_storage_slots_allocator_remove_and_reserve) +- [Function `free_reserved_slot`](#0x1_storage_slots_allocator_free_reserved_slot) +- [Function `reserved_to_index`](#0x1_storage_slots_allocator_reserved_to_index) +- [Function `stored_to_index`](#0x1_storage_slots_allocator_stored_to_index) +- [Function `is_null_index`](#0x1_storage_slots_allocator_is_null_index) +- [Function `is_special_unused_index`](#0x1_storage_slots_allocator_is_special_unused_index) +- [Function `maybe_pop_from_reuse_queue`](#0x1_storage_slots_allocator_maybe_pop_from_reuse_queue) +- [Function `maybe_push_to_reuse_queue`](#0x1_storage_slots_allocator_maybe_push_to_reuse_queue) +- [Function `next_slot_index`](#0x1_storage_slots_allocator_next_slot_index) +- [Function `add_link`](#0x1_storage_slots_allocator_add_link) +- [Function `remove_link`](#0x1_storage_slots_allocator_remove_link) +- [Specification](#@Specification_1) + + +
use 0x1::error;
+use 0x1::option;
+use 0x1::table_with_length;
+
+ + + + + +## Enum `Link` + +Data stored in an individual slot + + +
enum Link<T: store> has store
+
+ + + +
+Variants + + +
+Occupied + + +
+Fields + + +
+
+value: T +
+
+ +
+
+ + +
+ +
+ +
+Vacant + + +
+Fields + + +
+
+next: u64 +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Enum `StorageSlotsAllocator` + + + +
enum StorageSlotsAllocator<T: store> has store
+
+ + + +
+Variants + + +
+V1 + + +
+Fields + + +
+
+slots: option::Option<table_with_length::TableWithLength<u64, storage_slots_allocator::Link<T>>> +
+
+ +
+
+new_slot_index: u64 +
+
+ +
+
+should_reuse: bool +
+
+ +
+
+reuse_head_index: u64 +
+
+ +
+
+reuse_spare_count: u32 +
+
+ +
+
+ + +
+ +
+ +
+ + + +## Struct `ReservedSlot` + +Handle to a reserved slot within a transaction. +Not copy/drop/store-able, to guarantee reservation +is used or released within the transaction. + + +
struct ReservedSlot
+
+ + + +
+Fields + + +
+
+slot_index: u64 +
+
+ +
+
+ + +
+ + + +## Struct `StoredSlot` + +Ownership handle to a slot. +Not copy/drop-able to make sure slots are released when not needed, +and there is unique owner for each slot. + + +
struct StoredSlot has store
+
+ + + +
+Fields + + +
+
+slot_index: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ECANNOT_HAVE_SPARES_WITHOUT_REUSE: u64 = 2;
+
+ + + + + + + +
const EINTERNAL_INVARIANT_BROKEN: u64 = 7;
+
+ + + + + + + +
const EINVALID_ARGUMENT: u64 = 1;
+
+ + + + + + + +
const FIRST_INDEX: u64 = 10;
+
+ + + + + + + +
const NULL_INDEX: u64 = 0;
+
+ + + + + +## Function `new` + + + +
public fun new<T: store>(should_reuse: bool): storage_slots_allocator::StorageSlotsAllocator<T>
+
+ + + +
+Implementation + + +
public fun new<T: store>(should_reuse: bool): StorageSlotsAllocator<T> {
+    StorageSlotsAllocator::V1 {
+        slots: option::none(),
+        new_slot_index: FIRST_INDEX,
+        should_reuse,
+        reuse_head_index: NULL_INDEX,
+        reuse_spare_count: 0,
+    }
+}
+
+ + + +
+ + + +## Function `allocate_spare_slots` + + + +
public fun allocate_spare_slots<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, num_to_allocate: u64)
+
+ + + +
+Implementation + + +
public fun allocate_spare_slots<T: store>(self: &mut StorageSlotsAllocator<T>, num_to_allocate: u64) {
+    assert!(self.should_reuse, error::invalid_argument(ECANNOT_HAVE_SPARES_WITHOUT_REUSE));
+    for (i in 0..num_to_allocate) {
+        let slot_index = self.next_slot_index();
+        self.maybe_push_to_reuse_queue(slot_index);
+    };
+}
+
+ + + +
+ + + +## Function `get_num_spare_slot_count` + + + +
public fun get_num_spare_slot_count<T: store>(self: &storage_slots_allocator::StorageSlotsAllocator<T>): u32
+
+ + + +
+Implementation + + +
public fun get_num_spare_slot_count<T: store>(self: &StorageSlotsAllocator<T>): u32 {
+    assert!(self.should_reuse, error::invalid_argument(ECANNOT_HAVE_SPARES_WITHOUT_REUSE));
+    self.reuse_spare_count
+}
+
+ + + +
+ + + +## Function `add` + + + +
public fun add<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, val: T): storage_slots_allocator::StoredSlot
+
+ + + +
+Implementation + + +
public fun add<T: store>(self: &mut StorageSlotsAllocator<T>, val: T): StoredSlot {
+    let (stored_slot, reserved_slot) = self.reserve_slot();
+    self.fill_reserved_slot(reserved_slot, val);
+    stored_slot
+}
+
+ + + +
+ + + +## Function `remove` + + + +
public fun remove<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, slot: storage_slots_allocator::StoredSlot): T
+
+ + + +
+Implementation + + +
public fun remove<T: store>(self: &mut StorageSlotsAllocator<T>, slot: StoredSlot): T {
+    let (reserved_slot, value) = self.remove_and_reserve(slot.stored_to_index());
+    self.free_reserved_slot(reserved_slot, slot);
+    value
+}
+
+ + + +
+ + + +## Function `destroy_empty` + + + +
public fun destroy_empty<T: store>(self: storage_slots_allocator::StorageSlotsAllocator<T>)
+
+ + + +
+Implementation + + +
public fun destroy_empty<T: store>(self: StorageSlotsAllocator<T>) {
+    loop {
+        let reuse_index = self.maybe_pop_from_reuse_queue();
+        if (reuse_index == NULL_INDEX) {
+            break;
+        };
+    };
+    match (self) {
+        V1 {
+            slots,
+            new_slot_index: _,
+            should_reuse: _,
+            reuse_head_index,
+            reuse_spare_count: _,
+        } => {
+            assert!(reuse_head_index == NULL_INDEX, EINTERNAL_INVARIANT_BROKEN);
+            if (slots.is_some()) {
+                slots.destroy_some().destroy_empty();
+            } else {
+                slots.destroy_none();
+            }
+        },
+    };
+}
+
+ + + +
+ + + +## Function `borrow` + + + +
public fun borrow<T: store>(self: &storage_slots_allocator::StorageSlotsAllocator<T>, slot_index: u64): &T
+
+ + + +
+Implementation + + +
public fun borrow<T: store>(self: &StorageSlotsAllocator<T>, slot_index: u64): &T {
+    &self.slots.borrow().borrow(slot_index).value
+}
+
+ + + +
+ + + +## Function `borrow_mut` + + + +
public fun borrow_mut<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, slot_index: u64): &mut T
+
+ + + +
+Implementation + + +
public fun borrow_mut<T: store>(self: &mut StorageSlotsAllocator<T>, slot_index: u64): &mut T {
+    &mut self.slots.borrow_mut().borrow_mut(slot_index).value
+}
+
+ + + +
+ + + +## Function `reserve_slot` + + + +
public fun reserve_slot<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>): (storage_slots_allocator::StoredSlot, storage_slots_allocator::ReservedSlot)
+
+ + + +
+Implementation + + +
public fun reserve_slot<T: store>(self: &mut StorageSlotsAllocator<T>): (StoredSlot, ReservedSlot) {
+    let slot_index = self.maybe_pop_from_reuse_queue();
+    if (slot_index == NULL_INDEX) {
+        slot_index = self.next_slot_index();
+    };
+
+    (
+        StoredSlot { slot_index },
+        ReservedSlot { slot_index },
+    )
+}
+
+ + + +
+ + + +## Function `fill_reserved_slot` + + + +
public fun fill_reserved_slot<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, slot: storage_slots_allocator::ReservedSlot, val: T)
+
+ + + +
+Implementation + + +
public fun fill_reserved_slot<T: store>(self: &mut StorageSlotsAllocator<T>, slot: ReservedSlot, val: T) {
+    let ReservedSlot { slot_index } = slot;
+    self.add_link(slot_index, Link::Occupied { value: val });
+}
+
+ + + +
+ + + +## Function `remove_and_reserve` + +Remove storage slot, but reserve it for later. + + +
public fun remove_and_reserve<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, slot_index: u64): (storage_slots_allocator::ReservedSlot, T)
+
+ + + +
+Implementation + + +
public fun remove_and_reserve<T: store>(self: &mut StorageSlotsAllocator<T>, slot_index: u64): (ReservedSlot, T) {
+    let Link::Occupied { value } = self.remove_link(slot_index);
+    (ReservedSlot { slot_index }, value)
+}
+
+ + + +
+ + + +## Function `free_reserved_slot` + + + +
public fun free_reserved_slot<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, reserved_slot: storage_slots_allocator::ReservedSlot, stored_slot: storage_slots_allocator::StoredSlot)
+
+ + + +
+Implementation + + +
public fun free_reserved_slot<T: store>(self: &mut StorageSlotsAllocator<T>, reserved_slot: ReservedSlot, stored_slot: StoredSlot) {
+    let ReservedSlot { slot_index } = reserved_slot;
+    assert!(slot_index == stored_slot.slot_index, EINVALID_ARGUMENT);
+    let StoredSlot { slot_index: _ } = stored_slot;
+    self.maybe_push_to_reuse_queue(slot_index);
+}
+
+ + + +
+ + + +## Function `reserved_to_index` + + + +
public fun reserved_to_index(self: &storage_slots_allocator::ReservedSlot): u64
+
+ + + +
+Implementation + + +
public fun reserved_to_index(self: &ReservedSlot): u64 {
+    self.slot_index
+}
+
+ + + +
+ + + +## Function `stored_to_index` + + + +
public fun stored_to_index(self: &storage_slots_allocator::StoredSlot): u64
+
+ + + +
+Implementation + + +
public fun stored_to_index(self: &StoredSlot): u64 {
+    self.slot_index
+}
+
+ + + +
+ + + +## Function `is_null_index` + + + +
public fun is_null_index(slot_index: u64): bool
+
+ + + +
+Implementation + + +
public fun is_null_index(slot_index: u64): bool {
+    slot_index == NULL_INDEX
+}
+
+ + + +
+ + + +## Function `is_special_unused_index` + + + +
public fun is_special_unused_index(slot_index: u64): bool
+
+ + + +
+Implementation + + +
public fun is_special_unused_index(slot_index: u64): bool {
+    slot_index != NULL_INDEX && slot_index < FIRST_INDEX
+}
+
+ + + +
+ + + +## Function `maybe_pop_from_reuse_queue` + + + +
fun maybe_pop_from_reuse_queue<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>): u64
+
+ + + +
+Implementation + + +
fun maybe_pop_from_reuse_queue<T: store>(self: &mut StorageSlotsAllocator<T>): u64 {
+    let slot_index = self.reuse_head_index;
+    if (slot_index != NULL_INDEX) {
+        let Link::Vacant { next } = self.remove_link(slot_index);
+        self.reuse_head_index = next;
+        self.reuse_spare_count -= 1;
+    };
+    slot_index
+}
+
+ + + +
+ + + +## Function `maybe_push_to_reuse_queue` + + + +
fun maybe_push_to_reuse_queue<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, slot_index: u64)
+
+ + + +
+Implementation + + +
fun maybe_push_to_reuse_queue<T: store>(self: &mut StorageSlotsAllocator<T>, slot_index: u64) {
+    if (self.should_reuse) {
+        let link = Link::Vacant { next: self.reuse_head_index };
+        self.add_link(slot_index, link);
+        self.reuse_head_index = slot_index;
+        self.reuse_spare_count += 1;
+    };
+}
+
+ + + +
+ + + +## Function `next_slot_index` + + + +
fun next_slot_index<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>): u64
+
+ + + +
+Implementation + + +
fun next_slot_index<T: store>(self: &mut StorageSlotsAllocator<T>): u64 {
+    let slot_index = self.new_slot_index;
+    self.new_slot_index += 1;
+    if (self.slots.is_none()) {
+        self.slots.fill(table_with_length::new<u64, Link<T>>());
+    };
+    slot_index
+}
+
+ + + +
+ + + +## Function `add_link` + + + +
fun add_link<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, slot_index: u64, link: storage_slots_allocator::Link<T>)
+
+ + + +
+Implementation + + +
fun add_link<T: store>(self: &mut StorageSlotsAllocator<T>, slot_index: u64, link: Link<T>) {
+    self.slots.borrow_mut().add(slot_index, link);
+}
+
+ + + +
+ + + +## Function `remove_link` + + + +
fun remove_link<T: store>(self: &mut storage_slots_allocator::StorageSlotsAllocator<T>, slot_index: u64): storage_slots_allocator::Link<T>
+
+ + + +
+Implementation + + +
fun remove_link<T: store>(self: &mut StorageSlotsAllocator<T>, slot_index: u64): Link<T> {
+    self.slots.borrow_mut().remove(slot_index)
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/table.md b/aptos-move/framework/aptos-stdlib/doc/table.md index d05ea82adb590..27daa6af22491 100644 --- a/aptos-move/framework/aptos-stdlib/doc/table.md +++ b/aptos-move/framework/aptos-stdlib/doc/table.md @@ -22,7 +22,7 @@ struct itself, while the operations are implemented as native functions. No trav - [Function `upsert`](#0x1_table_upsert) - [Function `remove`](#0x1_table_remove) - [Function `contains`](#0x1_table_contains) -- [Function `destroy`](#0x1_table_destroy) +- [Function `destroy_known_empty_unsafe`](#0x1_table_destroy_known_empty_unsafe) - [Function `new_table_handle`](#0x1_table_new_table_handle) - [Function `add_box`](#0x1_table_add_box) - [Function `borrow_box`](#0x1_table_borrow_box) @@ -36,12 +36,13 @@ struct itself, while the operations are implemented as native functions. No trav - [Function `new`](#@Specification_0_new) - [Function `add`](#@Specification_0_add) - [Function `borrow`](#@Specification_0_borrow) + - [Function `borrow_with_default`](#@Specification_0_borrow_with_default) - [Function `borrow_mut`](#@Specification_0_borrow_mut) - [Function `borrow_mut_with_default`](#@Specification_0_borrow_mut_with_default) - [Function `upsert`](#@Specification_0_upsert) - [Function `remove`](#@Specification_0_remove) - [Function `contains`](#@Specification_0_contains) - - [Function `destroy`](#@Specification_0_destroy) + - [Function `destroy_known_empty_unsafe`](#@Specification_0_destroy_known_empty_unsafe)
@@ -140,7 +141,7 @@ key already exists. The entry itself is not stored in the table, and cannot be discovered from it. -
public fun add<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K, val: V)
+
public fun add<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K, val: V)
 
@@ -149,8 +150,8 @@ table, and cannot be discovered from it. Implementation -
public fun add<K: copy + drop, V>(table: &mut Table<K, V>, key: K, val: V) {
-    add_box<K, V, Box<V>>(table, key, Box { val })
+
public fun add<K: copy + drop, V>(self: &mut Table<K, V>, key: K, val: V) {
+    add_box<K, V, Box<V>>(self, key, Box { val })
 }
 
@@ -166,7 +167,7 @@ Acquire an immutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow<K: copy, drop, V>(table: &table::Table<K, V>, key: K): &V
+
public fun borrow<K: copy, drop, V>(self: &table::Table<K, V>, key: K): &V
 
@@ -175,8 +176,8 @@ Aborts if there is no entry for key. Implementation -
public fun borrow<K: copy + drop, V>(table: &Table<K, V>, key: K): &V {
-    &borrow_box<K, V, Box<V>>(table, key).val
+
public fun borrow<K: copy + drop, V>(self: &Table<K, V>, key: K): &V {
+    &borrow_box<K, V, Box<V>>(self, key).val
 }
 
@@ -192,7 +193,7 @@ Acquire an immutable reference to the value which key maps to. Returns specified default value if there is no entry for key. -
public fun borrow_with_default<K: copy, drop, V>(table: &table::Table<K, V>, key: K, default: &V): &V
+
public fun borrow_with_default<K: copy, drop, V>(self: &table::Table<K, V>, key: K, default: &V): &V
 
@@ -201,11 +202,11 @@ Returns specified default value if there is no entry for key. Implementation -
public fun borrow_with_default<K: copy + drop, V>(table: &Table<K, V>, key: K, default: &V): &V {
-    if (!contains(table, copy key)) {
+
public fun borrow_with_default<K: copy + drop, V>(self: &Table<K, V>, key: K, default: &V): &V {
+    if (!self.contains(copy key)) {
         default
     } else {
-        borrow(table, copy key)
+        self.borrow(copy key)
     }
 }
 
@@ -222,7 +223,7 @@ Acquire a mutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow_mut<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K): &mut V
+
public fun borrow_mut<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): &mut V
 
@@ -231,8 +232,8 @@ Aborts if there is no entry for key. Implementation -
public fun borrow_mut<K: copy + drop, V>(table: &mut Table<K, V>, key: K): &mut V {
-    &mut borrow_box_mut<K, V, Box<V>>(table, key).val
+
public fun borrow_mut<K: copy + drop, V>(self: &mut Table<K, V>, key: K): &mut V {
+    &mut borrow_box_mut<K, V, Box<V>>(self, key).val
 }
 
@@ -248,7 +249,7 @@ Acquire a mutable reference to the value which key maps to. Insert the pair (key, default) first if there is no entry for key. -
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut table::Table<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, default: V): &mut V
 
@@ -257,11 +258,11 @@ Insert the pair (key, default) first if there is no en Implementation -
public fun borrow_mut_with_default<K: copy + drop, V: drop>(table: &mut Table<K, V>, key: K, default: V): &mut V {
-    if (!contains(table, copy key)) {
-        add(table, copy key, default)
+
public fun borrow_mut_with_default<K: copy + drop, V: drop>(self: &mut Table<K, V>, key: K, default: V): &mut V {
+    if (!self.contains(copy key)) {
+        self.add(copy key, default)
     };
-    borrow_mut(table, key)
+    self.borrow_mut(key)
 }
 
@@ -277,7 +278,7 @@ Insert the pair (key, value) if there is no entry for update the value of the entry for key to value otherwise -
public fun upsert<K: copy, drop, V: drop>(table: &mut table::Table<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, value: V)
 
@@ -286,11 +287,11 @@ update the value of the entry for key to value otherwi Implementation -
public fun upsert<K: copy + drop, V: drop>(table: &mut Table<K, V>, key: K, value: V) {
-    if (!contains(table, copy key)) {
-        add(table, copy key, value)
+
public fun upsert<K: copy + drop, V: drop>(self: &mut Table<K, V>, key: K, value: V) {
+    if (!self.contains(copy key)) {
+        self.add(copy key, value)
     } else {
-        let ref = borrow_mut(table, key);
+        let ref = self.borrow_mut(key);
         *ref = value;
     };
 }
@@ -304,11 +305,11 @@ update the value of the entry for key to value otherwi
 
 ## Function `remove`
 
-Remove from table and return the value which key maps to.
+Remove from self and return the value which key maps to.
 Aborts if there is no entry for key.
 
 
-
public fun remove<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): V
 
@@ -317,8 +318,8 @@ Aborts if there is no entry for key. Implementation -
public fun remove<K: copy + drop, V>(table: &mut Table<K, V>, key: K): V {
-    let Box { val } = remove_box<K, V, Box<V>>(table, key);
+
public fun remove<K: copy + drop, V>(self: &mut Table<K, V>, key: K): V {
+    let Box { val } = remove_box<K, V, Box<V>>(self, key);
     val
 }
 
@@ -331,10 +332,10 @@ Aborts if there is no entry for key. ## Function `contains` -Returns true iff table contains an entry for key. +Returns true iff self contains an entry for key. -
public fun contains<K: copy, drop, V>(table: &table::Table<K, V>, key: K): bool
+
public fun contains<K: copy, drop, V>(self: &table::Table<K, V>, key: K): bool
 
@@ -343,8 +344,8 @@ Returns true iff table contains an Implementation -
public fun contains<K: copy + drop, V>(table: &Table<K, V>, key: K): bool {
-    contains_box<K, V, Box<V>>(table, key)
+
public fun contains<K: copy + drop, V>(self: &Table<K, V>, key: K): bool {
+    contains_box<K, V, Box<V>>(self, key)
 }
 
@@ -352,13 +353,15 @@ Returns true iff table contains an - + -## Function `destroy` +## Function `destroy_known_empty_unsafe` +Table cannot know if it is empty or not, so this method is not public, +and can be used only in modules that know by themselves that table is empty. -
public(friend) fun destroy<K: copy, drop, V>(table: table::Table<K, V>)
+
public(friend) fun destroy_known_empty_unsafe<K: copy, drop, V>(self: table::Table<K, V>)
 
@@ -367,9 +370,9 @@ Returns true iff table contains an Implementation -
public(friend) fun destroy<K: copy + drop, V>(table: Table<K, V>) {
-    destroy_empty_box<K, V, Box<V>>(&table);
-    drop_unchecked_box<K, V, Box<V>>(table)
+
friend fun destroy_known_empty_unsafe<K: copy + drop, V>(self: Table<K, V>) {
+    destroy_empty_box<K, V, Box<V>>(&self);
+    drop_unchecked_box<K, V, Box<V>>(self)
 }
 
@@ -581,7 +584,7 @@ Returns true iff table contains an
pragma intrinsic = map,
     map_new = new,
-    map_destroy_empty = destroy,
+    map_destroy_empty = destroy_known_empty_unsafe,
     map_has_key = contains,
     map_add_no_override = add,
     map_add_override_if_exists = upsert,
@@ -589,6 +592,7 @@ Returns true iff table contains an
     map_borrow = borrow,
     map_borrow_mut = borrow_mut,
     map_borrow_mut_with_default = borrow_mut_with_default,
+    map_borrow_with_default = borrow_with_default,
     map_spec_get = spec_get,
     map_spec_set = spec_set,
     map_spec_del = spec_remove,
@@ -618,7 +622,7 @@ Returns true iff table contains an
 ### Function `add`
 
 
-
public fun add<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K, val: V)
+
public fun add<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K, val: V)
 
@@ -634,7 +638,23 @@ Returns true iff table contains an ### Function `borrow` -
public fun borrow<K: copy, drop, V>(table: &table::Table<K, V>, key: K): &V
+
public fun borrow<K: copy, drop, V>(self: &table::Table<K, V>, key: K): &V
+
+ + + + +
pragma intrinsic;
+
+ + + + + +### Function `borrow_with_default` + + +
public fun borrow_with_default<K: copy, drop, V>(self: &table::Table<K, V>, key: K, default: &V): &V
 
@@ -650,7 +670,7 @@ Returns true iff table contains an ### Function `borrow_mut` -
public fun borrow_mut<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K): &mut V
+
public fun borrow_mut<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): &mut V
 
@@ -666,7 +686,7 @@ Returns true iff table contains an ### Function `borrow_mut_with_default` -
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut table::Table<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, default: V): &mut V
 
@@ -682,7 +702,7 @@ Returns true iff table contains an ### Function `upsert` -
public fun upsert<K: copy, drop, V: drop>(table: &mut table::Table<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut table::Table<K, V>, key: K, value: V)
 
@@ -698,7 +718,7 @@ Returns true iff table contains an ### Function `remove` -
public fun remove<K: copy, drop, V>(table: &mut table::Table<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut table::Table<K, V>, key: K): V
 
@@ -714,7 +734,7 @@ Returns true iff table contains an ### Function `contains` -
public fun contains<K: copy, drop, V>(table: &table::Table<K, V>, key: K): bool
+
public fun contains<K: copy, drop, V>(self: &table::Table<K, V>, key: K): bool
 
@@ -761,12 +781,12 @@ Returns true iff table contains an - + -### Function `destroy` +### Function `destroy_known_empty_unsafe` -
public(friend) fun destroy<K: copy, drop, V>(table: table::Table<K, V>)
+
public(friend) fun destroy_known_empty_unsafe<K: copy, drop, V>(self: table::Table<K, V>)
 
diff --git a/aptos-move/framework/aptos-stdlib/doc/table_with_length.md b/aptos-move/framework/aptos-stdlib/doc/table_with_length.md index 1701578615e97..aeee7c8b66927 100644 --- a/aptos-move/framework/aptos-stdlib/doc/table_with_length.md +++ b/aptos-move/framework/aptos-stdlib/doc/table_with_length.md @@ -141,7 +141,7 @@ Create a new Table. Destroy a table. The table must be empty to succeed. -
public fun destroy_empty<K: copy, drop, V>(table: table_with_length::TableWithLength<K, V>)
+
public fun destroy_empty<K: copy, drop, V>(self: table_with_length::TableWithLength<K, V>)
 
@@ -150,10 +150,10 @@ Destroy a table. The table must be empty to succeed. Implementation -
public fun destroy_empty<K: copy + drop, V>(table: TableWithLength<K, V>) {
-    assert!(table.length == 0, error::invalid_state(ENOT_EMPTY));
-    let TableWithLength { inner, length: _ } = table;
-    table::destroy(inner)
+
public fun destroy_empty<K: copy + drop, V>(self: TableWithLength<K, V>) {
+    assert!(self.length == 0, error::invalid_state(ENOT_EMPTY));
+    let TableWithLength { inner, length: _ } = self;
+    inner.destroy_known_empty_unsafe()
 }
 
@@ -170,7 +170,7 @@ key already exists. The entry itself is not stored in the table, and cannot be discovered from it. -
public fun add<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
+
public fun add<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
 
@@ -179,9 +179,9 @@ table, and cannot be discovered from it. Implementation -
public fun add<K: copy + drop, V>(table: &mut TableWithLength<K, V>, key: K, val: V) {
-    table::add(&mut table.inner, key, val);
-    table.length = table.length + 1;
+
public fun add<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K, val: V) {
+    self.inner.add(key, val);
+    self.length += 1;
 }
 
@@ -197,7 +197,7 @@ Acquire an immutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>, key: K): &V
+
public fun borrow<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): &V
 
@@ -206,8 +206,8 @@ Aborts if there is no entry for key. Implementation -
public fun borrow<K: copy + drop, V>(table: &TableWithLength<K, V>, key: K): &V {
-    table::borrow(&table.inner, key)
+
public fun borrow<K: copy + drop, V>(self: &TableWithLength<K, V>, key: K): &V {
+    self.inner.borrow(key)
 }
 
@@ -223,7 +223,7 @@ Acquire a mutable reference to the value which key maps to. Aborts if there is no entry for key. -
public fun borrow_mut<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
+
public fun borrow_mut<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
 
@@ -232,8 +232,8 @@ Aborts if there is no entry for key. Implementation -
public fun borrow_mut<K: copy + drop, V>(table: &mut TableWithLength<K, V>, key: K): &mut V {
-    table::borrow_mut(&mut table.inner, key)
+
public fun borrow_mut<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K): &mut V {
+    self.inner.borrow_mut(key)
 }
 
@@ -248,7 +248,7 @@ Aborts if there is no entry for key. Returns the length of the table, i.e. the number of entries. -
public fun length<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>): u64
+
public fun length<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): u64
 
@@ -257,8 +257,8 @@ Returns the length of the table, i.e. the number of entries. Implementation -
public fun length<K: copy + drop, V>(table: &TableWithLength<K, V>): u64 {
-    table.length
+
public fun length<K: copy + drop, V>(self: &TableWithLength<K, V>): u64 {
+    self.length
 }
 
@@ -273,7 +273,7 @@ Returns the length of the table, i.e. the number of entries. Returns true if this table is empty. -
public fun empty<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>): bool
+
public fun empty<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): bool
 
@@ -282,8 +282,8 @@ Returns true if this table is empty. Implementation -
public fun empty<K: copy + drop, V>(table: &TableWithLength<K, V>): bool {
-    table.length == 0
+
public fun empty<K: copy + drop, V>(self: &TableWithLength<K, V>): bool {
+    self.length == 0
 }
 
@@ -299,7 +299,7 @@ Acquire a mutable reference to the value which key maps to. Insert the pair (key, default) first if there is no entry for key. -
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
 
@@ -308,13 +308,13 @@ Insert the pair (key, default) first if there is no en Implementation -
public fun borrow_mut_with_default<K: copy + drop, V: drop>(table: &mut TableWithLength<K, V>, key: K, default: V): &mut V {
-    if (table::contains(&table.inner, key)) {
-        table::borrow_mut(&mut table.inner, key)
+
public fun borrow_mut_with_default<K: copy + drop, V: drop>(self: &mut TableWithLength<K, V>, key: K, default: V): &mut V {
+    if (self.inner.contains(key)) {
+        self.inner.borrow_mut(key)
     } else {
-        table::add(&mut table.inner, key, default);
-        table.length = table.length + 1;
-        table::borrow_mut(&mut table.inner, key)
+        self.inner.add(key, default);
+        self.length += 1;
+        self.inner.borrow_mut(key)
     }
 }
 
@@ -331,7 +331,7 @@ Insert the pair (key, value) if there is no entry for update the value of the entry for key to value otherwise -
public fun upsert<K: copy, drop, V: drop>(table: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
 
@@ -340,11 +340,11 @@ update the value of the entry for key to value otherwi Implementation -
public fun upsert<K: copy + drop, V: drop>(table: &mut TableWithLength<K, V>, key: K, value: V) {
-    if (!table::contains(&table.inner, key)) {
-        add(table, copy key, value)
+
public fun upsert<K: copy + drop, V: drop>(self: &mut TableWithLength<K, V>, key: K, value: V) {
+    if (!self.inner.contains(key)) {
+        self.add(copy key, value)
     } else {
-        let ref = table::borrow_mut(&mut table.inner, key);
+        let ref = self.inner.borrow_mut(key);
         *ref = value;
     };
 }
@@ -362,7 +362,7 @@ Remove from table and return the v
 Aborts if there is no entry for key.
 
 
-
public fun remove<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): V
 
@@ -371,9 +371,9 @@ Aborts if there is no entry for key. Implementation -
public fun remove<K: copy + drop, V>(table: &mut TableWithLength<K, V>, key: K): V {
-    let val = table::remove(&mut table.inner, key);
-    table.length = table.length - 1;
+
public fun remove<K: copy + drop, V>(self: &mut TableWithLength<K, V>, key: K): V {
+    let val = self.inner.remove(key);
+    self.length -= 1;
     val
 }
 
@@ -389,7 +389,7 @@ Aborts if there is no entry for key. Returns true iff table contains an entry for key. -
public fun contains<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>, key: K): bool
+
public fun contains<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): bool
 
@@ -398,8 +398,8 @@ Returns true iff table contains an Implementation -
public fun contains<K: copy + drop, V>(table: &TableWithLength<K, V>, key: K): bool {
-    table::contains(&table.inner, key)
+
public fun contains<K: copy + drop, V>(self: &TableWithLength<K, V>, key: K): bool {
+    self.inner.contains(key)
 }
 
@@ -412,6 +412,51 @@ Returns true iff table contains an ## Specification + + + + +
native fun spec_len<K, V>(t: TableWithLength<K, V>): num;
+
+ + + + + + + +
native fun spec_contains<K, V>(t: TableWithLength<K, V>, k: K): bool;
+
+ + + + + + + +
native fun spec_set<K, V>(t: TableWithLength<K, V>, k: K, v: V): TableWithLength<K, V>;
+
+ + + + + + + +
native fun spec_remove<K, V>(t: TableWithLength<K, V>, k: K): TableWithLength<K, V>;
+
+ + + + + + + +
native fun spec_get<K, V>(t: TableWithLength<K, V>, k: K): V;
+
+ + + ### Struct `TableWithLength` @@ -481,7 +526,7 @@ Returns true iff table contains an ### Function `destroy_empty` -
public fun destroy_empty<K: copy, drop, V>(table: table_with_length::TableWithLength<K, V>)
+
public fun destroy_empty<K: copy, drop, V>(self: table_with_length::TableWithLength<K, V>)
 
@@ -497,7 +542,7 @@ Returns true iff table contains an ### Function `add` -
public fun add<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
+
public fun add<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K, val: V)
 
@@ -513,7 +558,7 @@ Returns true iff table contains an ### Function `borrow` -
public fun borrow<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>, key: K): &V
+
public fun borrow<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): &V
 
@@ -529,7 +574,7 @@ Returns true iff table contains an ### Function `borrow_mut` -
public fun borrow_mut<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
+
public fun borrow_mut<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): &mut V
 
@@ -545,7 +590,7 @@ Returns true iff table contains an ### Function `length` -
public fun length<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>): u64
+
public fun length<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): u64
 
@@ -561,7 +606,7 @@ Returns true iff table contains an ### Function `empty` -
public fun empty<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>): bool
+
public fun empty<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>): bool
 
@@ -577,7 +622,7 @@ Returns true iff table contains an ### Function `borrow_mut_with_default` -
public fun borrow_mut_with_default<K: copy, drop, V: drop>(table: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
+
public fun borrow_mut_with_default<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, default: V): &mut V
 
@@ -594,7 +639,7 @@ Returns true iff table contains an ### Function `upsert` -
public fun upsert<K: copy, drop, V: drop>(table: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
+
public fun upsert<K: copy, drop, V: drop>(self: &mut table_with_length::TableWithLength<K, V>, key: K, value: V)
 
@@ -610,7 +655,7 @@ Returns true iff table contains an ### Function `remove` -
public fun remove<K: copy, drop, V>(table: &mut table_with_length::TableWithLength<K, V>, key: K): V
+
public fun remove<K: copy, drop, V>(self: &mut table_with_length::TableWithLength<K, V>, key: K): V
 
@@ -626,7 +671,7 @@ Returns true iff table contains an ### Function `contains` -
public fun contains<K: copy, drop, V>(table: &table_with_length::TableWithLength<K, V>, key: K): bool
+
public fun contains<K: copy, drop, V>(self: &table_with_length::TableWithLength<K, V>, key: K): bool
 
@@ -636,49 +681,4 @@ Returns true iff table contains an
- - - - - -
native fun spec_len<K, V>(t: TableWithLength<K, V>): num;
-
- - - - - - - -
native fun spec_contains<K, V>(t: TableWithLength<K, V>, k: K): bool;
-
- - - - - - - -
native fun spec_set<K, V>(t: TableWithLength<K, V>, k: K, v: V): TableWithLength<K, V>;
-
- - - - - - - -
native fun spec_remove<K, V>(t: TableWithLength<K, V>, k: K): TableWithLength<K, V>;
-
- - - - - - - -
native fun spec_get<K, V>(t: TableWithLength<K, V>, k: K): V;
-
- - [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-stdlib/doc/type_info.md b/aptos-move/framework/aptos-stdlib/doc/type_info.md index 1d9687e05701a..ea35286e1f1d6 100644 --- a/aptos-move/framework/aptos-stdlib/doc/type_info.md +++ b/aptos-move/framework/aptos-stdlib/doc/type_info.md @@ -15,15 +15,12 @@ - [Function `type_name`](#0x1_type_info_type_name) - [Function `chain_id_internal`](#0x1_type_info_chain_id_internal) - [Function `size_of_val`](#0x1_type_info_size_of_val) -- [Function `verify_type_of`](#0x1_type_info_verify_type_of) -- [Function `verify_type_of_generic`](#0x1_type_info_verify_type_of_generic) - [Specification](#@Specification_1) - [Function `chain_id`](#@Specification_1_chain_id) - [Function `type_of`](#@Specification_1_type_of) - [Function `type_name`](#@Specification_1_type_name) - [Function `chain_id_internal`](#@Specification_1_chain_id_internal) - [Function `size_of_val`](#@Specification_1_size_of_val) - - [Function `verify_type_of_generic`](#@Specification_1_verify_type_of_generic)
use 0x1::bcs;
@@ -93,7 +90,7 @@
 
 
 
-
public fun account_address(type_info: &type_info::TypeInfo): address
+
public fun account_address(self: &type_info::TypeInfo): address
 
@@ -102,8 +99,8 @@ Implementation -
public fun account_address(type_info: &TypeInfo): address {
-    type_info.account_address
+
public fun account_address(self: &TypeInfo): address {
+    self.account_address
 }
 
@@ -117,7 +114,7 @@ -
public fun module_name(type_info: &type_info::TypeInfo): vector<u8>
+
public fun module_name(self: &type_info::TypeInfo): vector<u8>
 
@@ -126,8 +123,8 @@ Implementation -
public fun module_name(type_info: &TypeInfo): vector<u8> {
-    type_info.module_name
+
public fun module_name(self: &TypeInfo): vector<u8> {
+    self.module_name
 }
 
@@ -141,7 +138,7 @@ -
public fun struct_name(type_info: &type_info::TypeInfo): vector<u8>
+
public fun struct_name(self: &type_info::TypeInfo): vector<u8>
 
@@ -150,8 +147,8 @@ Implementation -
public fun struct_name(type_info: &TypeInfo): vector<u8> {
-    type_info.struct_name
+
public fun struct_name(self: &TypeInfo): vector<u8> {
+    self.struct_name
 }
 
@@ -283,8 +280,7 @@ analysis of vector size dynamism.
public fun size_of_val<T>(val_ref: &T): u64 {
-    // Return vector length of vectorized BCS representation.
-    vector::length(&bcs::to_bytes(val_ref))
+    bcs::serialized_size(val_ref)
 }
 
@@ -292,77 +288,20 @@ analysis of vector size dynamism. - - -## Function `verify_type_of` - - - -
#[verify_only]
-fun verify_type_of()
-
- - - -
-Implementation - - -
fun verify_type_of() {
-    let type_info = type_of<TypeInfo>();
-    let account_address = account_address(&type_info);
-    let module_name = module_name(&type_info);
-    let struct_name = struct_name(&type_info);
-    spec {
-        assert account_address == @aptos_std;
-        assert module_name == b"type_info";
-        assert struct_name == b"TypeInfo";
-    };
-}
-
- - - -
- - - -## Function `verify_type_of_generic` - - + -
#[verify_only]
-fun verify_type_of_generic<T>()
-
+## Specification -
-Implementation + -
fun verify_type_of_generic<T>() {
-    let type_info = type_of<T>();
-    let account_address = account_address(&type_info);
-    let module_name = module_name(&type_info);
-    let struct_name = struct_name(&type_info);
-    spec {
-        assert account_address == type_of<T>().account_address;
-        assert module_name == type_of<T>().module_name;
-        assert struct_name == type_of<T>().struct_name;
-    };
-}
+
native fun spec_is_struct<T>(): bool;
 
-
- - - -## Specification - - ### Function `chain_id` @@ -451,34 +390,7 @@ analysis of vector size dynamism. -
aborts_if false;
-ensures result == spec_size_of_val<T>(val_ref);
-
- - - - - -### Function `verify_type_of_generic` - - -
#[verify_only]
-fun verify_type_of_generic<T>()
-
- - - - -
aborts_if !spec_is_struct<T>();
-
- - - - - - - -
native fun spec_is_struct<T>(): bool;
+
ensures result == spec_size_of_val<T>(val_ref);
 
diff --git a/aptos-move/framework/aptos-stdlib/sources/any.move b/aptos-move/framework/aptos-stdlib/sources/any.move index d2851b77f44b4..ffa6ba7c6dc5c 100644 --- a/aptos-move/framework/aptos-stdlib/sources/any.move +++ b/aptos-move/framework/aptos-stdlib/sources/any.move @@ -36,14 +36,14 @@ module aptos_std::any { } /// Unpack a value from the `Any` representation. This aborts if the value has not the expected type `T`. - public fun unpack(x: Any): T { - assert!(type_info::type_name() == x.type_name, error::invalid_argument(ETYPE_MISMATCH)); - from_bytes(x.data) + public fun unpack(self: Any): T { + assert!(type_info::type_name() == self.type_name, error::invalid_argument(ETYPE_MISMATCH)); + from_bytes(self.data) } /// Returns the type name of this Any - public fun type_name(x: &Any): &String { - &x.type_name + public fun type_name(self: &Any): &String { + &self.type_name } #[test_only] @@ -51,7 +51,7 @@ module aptos_std::any { #[test] fun test_any() { - assert!(unpack(pack(22)) == 22, 1); - assert!(unpack(pack(S { x: 22 })) == S { x: 22 }, 2); + assert!(pack(22).unpack::() == 22, 1); + assert!(pack(S { x: 22 }).unpack::() == S { x: 22 }, 2); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/any.spec.move b/aptos-move/framework/aptos-stdlib/sources/any.spec.move index 2e55009e4bed3..47501c83d9e27 100644 --- a/aptos-move/framework/aptos-stdlib/sources/any.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/any.spec.move @@ -15,28 +15,28 @@ spec aptos_std::any { ensures [abstract] from_bcs::deserializable(result.data); } - spec unpack(x: Any): T { + spec unpack(self: Any): T { use aptos_std::from_bcs; include UnpackAbortsIf; - ensures result == from_bcs::deserialize(x.data); + ensures result == from_bcs::deserialize(self.data); } spec schema UnpackAbortsIf { use aptos_std::from_bcs; - x: Any; - aborts_if type_info::type_name() != x.type_name; - aborts_if !from_bcs::deserializable(x.data); + self: Any; + aborts_if type_info::type_name() != self.type_name; + aborts_if !from_bcs::deserializable(self.data); } spec schema UnpackRequirement { use aptos_std::from_bcs; - x: Any; - requires type_info::type_name() == x.type_name; - requires from_bcs::deserializable(x.data); + self: Any; + requires type_info::type_name() == self.type_name; + requires from_bcs::deserializable(self.data); } - spec type_name(x: &Any): &String { + spec type_name(self: &Any): &String { aborts_if false; - ensures result == x.type_name; + ensures result == self.type_name; } } diff --git a/aptos-move/framework/aptos-stdlib/sources/bcs_stream.move b/aptos-move/framework/aptos-stdlib/sources/bcs_stream.move new file mode 100644 index 0000000000000..bf5c64f8f8c89 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/bcs_stream.move @@ -0,0 +1,301 @@ +/// This module enables the deserialization of BCS-formatted byte arrays into Move primitive types. +/// Deserialization Strategies: +/// - Per-Byte Deserialization: Employed for most types to ensure lower gas consumption, this method processes each byte +/// individually to match the length and type requirements of target Move types. +/// - Exception: For the `deserialize_address` function, the function-based approach from `aptos_std::from_bcs` is used +/// due to type constraints, even though it is generally more gas-intensive. +/// - This can be optimized further by introducing native vector slices. +/// Application: +/// - This deserializer is particularly valuable for processing BCS serialized data within Move modules, +/// especially useful for systems requiring cross-chain message interpretation or off-chain data verification. +module aptos_std::bcs_stream { + use std::error; + use std::vector; + use std::option::{Self, Option}; + use std::string::{Self, String}; + + use aptos_std::from_bcs; + + /// The data does not fit the expected format. + const EMALFORMED_DATA: u64 = 1; + /// There are not enough bytes to deserialize for the given type. + const EOUT_OF_BYTES: u64 = 2; + + struct BCSStream has drop { + /// Byte buffer containing the serialized data. + data: vector, + /// Cursor indicating the current position in the byte buffer. + cur: u64, + } + + /// Constructs a new BCSStream instance from the provided byte array. + public fun new(data: vector): BCSStream { + BCSStream { + data, + cur: 0, + } + } + + public fun has_remaining(stream: &mut BCSStream): bool { + stream.cur < stream.data.length() + } + + /// Deserializes a ULEB128-encoded integer from the stream. + /// In the BCS format, lengths of vectors are represented using ULEB128 encoding. + public fun deserialize_uleb128(stream: &mut BCSStream): u64 { + let res = 0; + let shift = 0; + + while (stream.cur < stream.data.length()) { + let byte = stream.data[stream.cur]; + stream.cur += 1; + + let val = ((byte & 0x7f) as u64); + if (((val << shift) >> shift) != val) { + abort error::invalid_argument(EMALFORMED_DATA) + }; + res |= (val << shift); + + if ((byte & 0x80) == 0) { + if (shift > 0 && val == 0) { + abort error::invalid_argument(EMALFORMED_DATA) + }; + return res + }; + + shift += 7; + if (shift > 64) { + abort error::invalid_argument(EMALFORMED_DATA) + }; + }; + + abort error::out_of_range(EOUT_OF_BYTES) + } + + /// Deserializes a `bool` value from the stream. + public fun deserialize_bool(stream: &mut BCSStream): bool { + assert!(stream.cur < stream.data.length(), error::out_of_range(EOUT_OF_BYTES)); + let byte = stream.data[stream.cur]; + stream.cur += 1; + if (byte == 0) { + false + } else if (byte == 1) { + true + } else { + abort error::invalid_argument(EMALFORMED_DATA) + } + } + + /// Deserializes an `address` value from the stream. + /// 32-byte `address` values are serialized using little-endian byte order. + /// This function utilizes the `to_address` function from the `aptos_std::from_bcs` module, + /// because the Move type system does not permit per-byte referencing of addresses. + public fun deserialize_address(stream: &mut BCSStream): address { + let data = &stream.data; + let cur = stream.cur; + + assert!(cur + 32 <= data.length(), error::out_of_range(EOUT_OF_BYTES)); + let res = from_bcs::to_address(data.slice(cur, cur + 32)); + + stream.cur = cur + 32; + res + } + + /// Deserializes a `u8` value from the stream. + /// 1-byte `u8` values are serialized using little-endian byte order. + public fun deserialize_u8(stream: &mut BCSStream): u8 { + let data = &stream.data; + let cur = stream.cur; + + assert!(cur < data.length(), error::out_of_range(EOUT_OF_BYTES)); + + let res = data[cur]; + + stream.cur = cur + 1; + res + } + + /// Deserializes a `u16` value from the stream. + /// 2-byte `u16` values are serialized using little-endian byte order. + public fun deserialize_u16(stream: &mut BCSStream): u16 { + let data = &stream.data; + let cur = stream.cur; + + assert!(cur + 2 <= data.length(), error::out_of_range(EOUT_OF_BYTES)); + let res = + (data[cur] as u16) | + ((data[cur + 1] as u16) << 8) + ; + + stream.cur += 2; + res + } + + /// Deserializes a `u32` value from the stream. + /// 4-byte `u32` values are serialized using little-endian byte order. + public fun deserialize_u32(stream: &mut BCSStream): u32 { + let data = &stream.data; + let cur = stream.cur; + + assert!(cur + 4 <= data.length(), error::out_of_range(EOUT_OF_BYTES)); + let res = + (data[cur] as u32) | + ((data[cur + 1] as u32) << 8) | + ((data[cur + 2] as u32) << 16) | + ((data[cur + 3] as u32) << 24) + ; + + stream.cur += 4; + res + } + + /// Deserializes a `u64` value from the stream. + /// 8-byte `u64` values are serialized using little-endian byte order. + public fun deserialize_u64(stream: &mut BCSStream): u64 { + let data = &stream.data; + let cur = stream.cur; + + assert!(cur + 8 <= data.length(), error::out_of_range(EOUT_OF_BYTES)); + let res = + (data[cur] as u64) | + ((data[cur + 1] as u64) << 8) | + ((data[cur + 2] as u64) << 16) | + ((data[cur + 3] as u64) << 24) | + ((data[cur + 4] as u64) << 32) | + ((data[cur + 5] as u64) << 40) | + ((data[cur + 6] as u64) << 48) | + ((data[cur + 7] as u64) << 56) + ; + + stream.cur += 8; + res + } + + /// Deserializes a `u128` value from the stream. + /// 16-byte `u128` values are serialized using little-endian byte order. + public fun deserialize_u128(stream: &mut BCSStream): u128 { + let data = &stream.data; + let cur = stream.cur; + + assert!(cur + 16 <= data.length(), error::out_of_range(EOUT_OF_BYTES)); + let res = + (data[cur] as u128) | + ((data[cur + 1] as u128) << 8) | + ((data[cur + 2] as u128) << 16) | + ((data[cur + 3] as u128) << 24) | + ((data[cur + 4] as u128) << 32) | + ((data[cur + 5] as u128) << 40) | + ((data[cur + 6] as u128) << 48) | + ((data[cur + 7] as u128) << 56) | + ((data[cur + 8] as u128) << 64) | + ((data[cur + 9] as u128) << 72) | + ((data[cur + 10] as u128) << 80) | + ((data[cur + 11] as u128) << 88) | + ((data[cur + 12] as u128) << 96) | + ((data[cur + 13] as u128) << 104) | + ((data[cur + 14] as u128) << 112) | + ((data[cur + 15] as u128) << 120) + ; + + stream.cur += 16; + res + } + + /// Deserializes a `u256` value from the stream. + /// 32-byte `u256` values are serialized using little-endian byte order. + public fun deserialize_u256(stream: &mut BCSStream): u256 { + let data = &stream.data; + let cur = stream.cur; + + assert!(cur + 32 <= data.length(), error::out_of_range(EOUT_OF_BYTES)); + let res = + (data[cur] as u256) | + ((data[cur + 1] as u256) << 8) | + ((data[cur + 2] as u256) << 16) | + ((data[cur + 3] as u256) << 24) | + ((data[cur + 4] as u256) << 32) | + ((data[cur + 5] as u256) << 40) | + ((data[cur + 6] as u256) << 48) | + ((data[cur + 7] as u256) << 56) | + ((data[cur + 8] as u256) << 64) | + ((data[cur + 9] as u256) << 72) | + ((data[cur + 10] as u256) << 80) | + ((data[cur + 11] as u256) << 88) | + ((data[cur + 12] as u256) << 96) | + ((data[cur + 13] as u256) << 104) | + ((data[cur + 14] as u256) << 112) | + ((data[cur + 15] as u256) << 120) | + ((data[cur + 16] as u256) << 128) | + ((data[cur + 17] as u256) << 136) | + ((data[cur + 18] as u256) << 144) | + ((data[cur + 19] as u256) << 152) | + ((data[cur + 20] as u256) << 160) | + ((data[cur + 21] as u256) << 168) | + ((data[cur + 22] as u256) << 176) | + ((data[cur + 23] as u256) << 184) | + ((data[cur + 24] as u256) << 192) | + ((data[cur + 25] as u256) << 200) | + ((data[cur + 26] as u256) << 208) | + ((data[cur + 27] as u256) << 216) | + ((data[cur + 28] as u256) << 224) | + ((data[cur + 29] as u256) << 232) | + ((data[cur + 30] as u256) << 240) | + ((data[cur + 31] as u256) << 248); + + stream.cur += 32; + res + } + + /// Deserializes a `u256` value from the stream. + public entry fun deserialize_u256_entry(data: vector, cursor: u64) { + let stream = BCSStream { + data, + cur: cursor, + }; + deserialize_u256(&mut stream); + } + + /// Deserializes an array of BCS deserializable elements from the stream. + /// First, reads the length of the vector, which is in uleb128 format. + /// After determining the length, it then reads the contents of the vector. + /// The `elem_deserializer` lambda expression is used sequentially to deserialize each element of the vector. + public inline fun deserialize_vector(stream: &mut BCSStream, elem_deserializer: |&mut BCSStream| E): vector { + let len = deserialize_uleb128(stream); + let v = vector::empty(); + + for (i in 0..len) { + v.push_back(elem_deserializer(stream)); + }; + + v + } + + /// Deserializes utf-8 `String` from the stream. + /// First, reads the length of the String, which is in uleb128 format. + /// After determining the length, it then reads the contents of the String. + public fun deserialize_string(stream: &mut BCSStream): String { + let len = deserialize_uleb128(stream); + let data = &stream.data; + let cur = stream.cur; + + assert!(cur + len <= data.length(), error::out_of_range(EOUT_OF_BYTES)); + + let res = string::utf8(data.slice(cur, cur + len)); + stream.cur = cur + len; + + res + } + + /// Deserializes `Option` from the stream. + /// First, reads a single byte representing the presence (0x01) or absence (0x00) of data. + /// After determining the presence of data, it then reads the actual data if present. + /// The `elem_deserializer` lambda expression is used to deserialize the element contained within the `Option`. + public inline fun deserialize_option(stream: &mut BCSStream, elem_deserializer: |&mut BCSStream| E): Option { + let is_data = deserialize_bool(stream); + if (is_data) { + option::some(elem_deserializer(stream)) + } else { + option::none() + } + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/bcs_stream.spec.move b/aptos-move/framework/aptos-stdlib/sources/bcs_stream.spec.move new file mode 100644 index 0000000000000..d1088fd5c393e --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/bcs_stream.spec.move @@ -0,0 +1,5 @@ +spec aptos_std::bcs_stream { + spec module { + pragma verify = false; + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/capability.move b/aptos-move/framework/aptos-stdlib/sources/capability.move index b61c18ccc15e8..56c583134fee0 100644 --- a/aptos-move/framework/aptos-stdlib/sources/capability.move +++ b/aptos-move/framework/aptos-stdlib/sources/capability.move @@ -135,7 +135,7 @@ module aptos_std::capability { let root_addr = borrow_global>(addr).root; // double check that requester is actually registered as a delegate assert!(exists>(root_addr), error::invalid_state(EDELEGATE)); - assert!(vector::contains(&borrow_global>(root_addr).delegates, &addr), + assert!(borrow_global>(root_addr).delegates.contains(&addr), error::invalid_state(EDELEGATE)); root_addr } else { @@ -146,48 +146,48 @@ module aptos_std::capability { /// Returns the root address associated with the given capability token. Only the owner /// of the feature can do this. - public fun root_addr(cap: Cap, _feature_witness: &Feature): address { - cap.root + public fun root_addr(self: Cap, _feature_witness: &Feature): address { + self.root } /// Returns the root address associated with the given linear capability token. - public fun linear_root_addr(cap: LinearCap, _feature_witness: &Feature): address { - cap.root + public fun linear_root_addr(self: LinearCap, _feature_witness: &Feature): address { + self.root } /// Registers a delegation relation. If the relation already exists, this function does /// nothing. // TODO: explore whether this should be idempotent like now or abort - public fun delegate(cap: Cap, _feature_witness: &Feature, to: &signer) + public fun delegate(self: Cap, _feature_witness: &Feature, to: &signer) acquires CapState { let addr = signer::address_of(to); if (exists>(addr)) return; - move_to(to, CapDelegateState { root: cap.root }); - add_element(&mut borrow_global_mut>(cap.root).delegates, addr); + move_to(to, CapDelegateState { root: self.root }); + add_element(&mut borrow_global_mut>(self.root).delegates, addr); } /// Revokes a delegation relation. If no relation exists, this function does nothing. // TODO: explore whether this should be idempotent like now or abort - public fun revoke(cap: Cap, _feature_witness: &Feature, from: address) + public fun revoke(self: Cap, _feature_witness: &Feature, from: address) acquires CapState, CapDelegateState { if (!exists>(from)) return; let CapDelegateState { root: _root } = move_from>(from); - remove_element(&mut borrow_global_mut>(cap.root).delegates, &from); + remove_element(&mut borrow_global_mut>(self.root).delegates, &from); } /// Helper to remove an element from a vector. fun remove_element(v: &mut vector, x: &E) { - let (found, index) = vector::index_of(v, x); + let (found, index) = v.index_of(x); if (found) { - vector::remove(v, index); + v.remove(index); } } /// Helper to add an element to a vector. fun add_element(v: &mut vector, x: E) { - if (!vector::contains(v, &x)) { - vector::push_back(v, x) + if (!v.contains(&x)) { + v.push_back(x) } } } diff --git a/aptos-move/framework/aptos-stdlib/sources/capability.spec.move b/aptos-move/framework/aptos-stdlib/sources/capability.spec.move index d7c6ba949f0a3..ef9af60e38197 100644 --- a/aptos-move/framework/aptos-stdlib/sources/capability.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/capability.spec.move @@ -44,14 +44,14 @@ spec aptos_std::capability { aborts_if !spec_has_delegate_cap(addr) && !spec_has_cap(addr); } - spec delegate(cap: Cap, _feature_witness: &Feature, to: &signer) { + spec delegate(self: Cap, _feature_witness: &Feature, to: &signer) { let addr = signer::address_of(to); ensures spec_has_delegate_cap(addr); - ensures !old(spec_has_delegate_cap(addr)) ==> global>(addr).root == cap.root; - ensures !old(spec_has_delegate_cap(addr)) ==> vector::spec_contains(spec_delegates(cap.root), addr); + ensures !old(spec_has_delegate_cap(addr)) ==> global>(addr).root == self.root; + ensures !old(spec_has_delegate_cap(addr)) ==> vector::spec_contains(spec_delegates(self.root), addr); } - spec revoke(cap: Cap, _feature_witness: &Feature, from: address) { + spec revoke(self: Cap, _feature_witness: &Feature, from: address) { ensures !spec_has_delegate_cap(from); // TODO: this cannot be proved. See issue #7422 // ensures old(spec_has_delegate_cap(from)) diff --git a/aptos-move/framework/aptos-stdlib/sources/comparator.move b/aptos-move/framework/aptos-stdlib/sources/comparator.move index 869b486b4ba5b..3467433d4d7c2 100644 --- a/aptos-move/framework/aptos-stdlib/sources/comparator.move +++ b/aptos-move/framework/aptos-stdlib/sources/comparator.move @@ -1,7 +1,6 @@ /// Provides a framework for comparing two elements module aptos_std::comparator { use std::bcs; - use std::vector; const EQUAL: u8 = 0; const SMALLER: u8 = 1; @@ -11,16 +10,16 @@ module aptos_std::comparator { inner: u8, } - public fun is_equal(result: &Result): bool { - result.inner == EQUAL + public fun is_equal(self: &Result): bool { + self.inner == EQUAL } - public fun is_smaller_than(result: &Result): bool { - result.inner == SMALLER + public fun is_smaller_than(self: &Result): bool { + self.inner == SMALLER } - public fun is_greater_than(result: &Result): bool { - result.inner == GREATER + public fun is_greater_than(self: &Result): bool { + self.inner == GREATER } // Performs a comparison of two types after BCS serialization. @@ -36,21 +35,21 @@ module aptos_std::comparator { // Performs a comparison of two vectors or byte vectors public fun compare_u8_vector(left: vector, right: vector): Result { - let left_length = vector::length(&left); - let right_length = vector::length(&right); + let left_length = left.length(); + let right_length = right.length(); let idx = 0; while (idx < left_length && idx < right_length) { - let left_byte = *vector::borrow(&left, idx); - let right_byte = *vector::borrow(&right, idx); + let left_byte = left[idx]; + let right_byte = right[idx]; if (left_byte < right_byte) { return Result { inner: SMALLER } } else if (left_byte > right_byte) { return Result { inner: GREATER } }; - idx = idx + 1; + idx += 1; }; if (left_length < right_length) { @@ -70,18 +69,18 @@ module aptos_std::comparator { let value1 = string::utf8(b"beta"); let value2 = string::utf8(b"betaa"); - assert!(is_equal(&compare(&value0, &value0)), 0); - assert!(is_equal(&compare(&value1, &value1)), 1); - assert!(is_equal(&compare(&value2, &value2)), 2); + assert!(compare(&value0, &value0).is_equal(), 0); + assert!(compare(&value1, &value1).is_equal(), 1); + assert!(compare(&value2, &value2).is_equal(), 2); - assert!(is_greater_than(&compare(&value0, &value1)), 3); - assert!(is_smaller_than(&compare(&value1, &value0)), 4); + assert!(compare(&value0, &value1).is_greater_than(), 3); + assert!(compare(&value1, &value0).is_smaller_than(), 4); - assert!(is_smaller_than(&compare(&value0, &value2)), 5); - assert!(is_greater_than(&compare(&value2, &value0)), 6); + assert!(compare(&value0, &value2).is_smaller_than(), 5); + assert!(compare(&value2, &value0).is_greater_than(), 6); - assert!(is_smaller_than(&compare(&value1, &value2)), 7); - assert!(is_greater_than(&compare(&value2, &value1)), 8); + assert!(compare(&value1, &value2).is_smaller_than(), 7); + assert!(compare(&value2, &value1).is_greater_than(), 8); } #[test] @@ -91,11 +90,11 @@ module aptos_std::comparator { let value0: u128 = 1; let value1: u128 = 256; - assert!(is_equal(&compare(&value0, &value0)), 0); - assert!(is_equal(&compare(&value1, &value1)), 1); + assert!(compare(&value0, &value0).is_equal(), 0); + assert!(compare(&value1, &value1).is_equal(), 1); - assert!(is_smaller_than(&compare(&value0, &value1)), 2); - assert!(is_greater_than(&compare(&value1, &value0)), 3); + assert!(compare(&value0, &value1).is_smaller_than(), 2); + assert!(compare(&value1, &value0).is_greater_than(), 3); } #[test] @@ -104,18 +103,18 @@ module aptos_std::comparator { let value1: u128 = 152; let value2: u128 = 511; // 0x1ff - assert!(is_equal(&compare(&value0, &value0)), 0); - assert!(is_equal(&compare(&value1, &value1)), 1); - assert!(is_equal(&compare(&value2, &value2)), 2); + assert!(compare(&value0, &value0).is_equal(), 0); + assert!(compare(&value1, &value1).is_equal(), 1); + assert!(compare(&value2, &value2).is_equal(), 2); - assert!(is_smaller_than(&compare(&value0, &value1)), 2); - assert!(is_greater_than(&compare(&value1, &value0)), 3); + assert!(compare(&value0, &value1).is_smaller_than(), 2); + assert!(compare(&value1, &value0).is_greater_than(), 3); - assert!(is_smaller_than(&compare(&value0, &value2)), 3); - assert!(is_greater_than(&compare(&value2, &value0)), 4); + assert!(compare(&value0, &value2).is_smaller_than(), 3); + assert!(compare(&value2, &value0).is_greater_than(), 4); - assert!(is_smaller_than(&compare(&value1, &value2)), 5); - assert!(is_greater_than(&compare(&value2, &value1)), 6); + assert!(compare(&value1, &value2).is_smaller_than(), 5); + assert!(compare(&value2, &value1).is_greater_than(), 6); } #[test_only] @@ -127,16 +126,8 @@ module aptos_std::comparator { #[test] public fun test_complex() { - let value0_0 = vector::empty(); - vector::push_back(&mut value0_0, 10); - vector::push_back(&mut value0_0, 9); - vector::push_back(&mut value0_0, 5); - - let value0_1 = vector::empty(); - vector::push_back(&mut value0_1, 10); - vector::push_back(&mut value0_1, 9); - vector::push_back(&mut value0_1, 5); - vector::push_back(&mut value0_1, 1); + let value0_0 = vector[10, 9, 5]; + let value0_1 = vector[10, 9, 5, 1]; let base = Complex { value0: value0_0, @@ -162,12 +153,12 @@ module aptos_std::comparator { value2: 42, }; - assert!(is_equal(&compare(&base, &base)), 0); - assert!(is_smaller_than(&compare(&base, &other_0)), 1); - assert!(is_greater_than(&compare(&other_0, &base)), 2); - assert!(is_smaller_than(&compare(&base, &other_1)), 3); - assert!(is_greater_than(&compare(&other_1, &base)), 4); - assert!(is_smaller_than(&compare(&base, &other_2)), 5); - assert!(is_greater_than(&compare(&other_2, &base)), 6); + assert!(compare(&base, &base).is_equal(), 0); + assert!(compare(&base, &other_0).is_smaller_than(), 1); + assert!(compare(&other_0, &base).is_greater_than(), 2); + assert!(compare(&base, &other_1).is_smaller_than(), 3); + assert!(compare(&other_1, &base).is_greater_than(), 4); + assert!(compare(&base, &other_2).is_smaller_than(), 5); + assert!(compare(&other_2, &base).is_greater_than(), 6); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/comparator.spec.move b/aptos-move/framework/aptos-stdlib/sources/comparator.spec.move index 0c14b444ad6e8..5e7d8e96a28a1 100644 --- a/aptos-move/framework/aptos-stdlib/sources/comparator.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/comparator.spec.move @@ -3,21 +3,21 @@ spec aptos_std::comparator { invariant inner == EQUAL || inner == SMALLER || inner == GREATER; } - spec is_equal(result: &Result): bool { + spec is_equal(self: &Result): bool { aborts_if false; - let res = result; + let res = self; ensures result == (res.inner == EQUAL); } - spec is_smaller_than(result: &Result): bool { + spec is_smaller_than(self: &Result): bool { aborts_if false; - let res = result; + let res = self; ensures result == (res.inner == SMALLER); } - spec is_greater_than(result: &Result): bool { + spec is_greater_than(self: &Result): bool { aborts_if false; - let res = result; + let res = self; ensures result == (res.inner == GREATER); } diff --git a/aptos-move/framework/aptos-stdlib/sources/copyable_any.move b/aptos-move/framework/aptos-stdlib/sources/copyable_any.move index b12303a3f9237..2c1175f9251f4 100644 --- a/aptos-move/framework/aptos-stdlib/sources/copyable_any.move +++ b/aptos-move/framework/aptos-stdlib/sources/copyable_any.move @@ -24,14 +24,14 @@ module aptos_std::copyable_any { } /// Unpack a value from the `Any` representation. This aborts if the value has not the expected type `T`. - public fun unpack(x: Any): T { - assert!(type_info::type_name() == x.type_name, error::invalid_argument(ETYPE_MISMATCH)); - from_bytes(x.data) + public fun unpack(self: Any): T { + assert!(type_info::type_name() == self.type_name, error::invalid_argument(ETYPE_MISMATCH)); + from_bytes(self.data) } /// Returns the type name of this Any - public fun type_name(x: &Any): &String { - &x.type_name + public fun type_name(self: &Any): &String { + &self.type_name } #[test_only] @@ -39,7 +39,7 @@ module aptos_std::copyable_any { #[test] fun test_any() { - assert!(unpack(pack(22)) == 22, 1); - assert!(unpack(pack(S { x: 22 })) == S { x: 22 }, 2); + assert!(pack(22).unpack::() == 22, 1); + assert!(pack(S { x: 22 }).unpack::() == S { x: 22 }, 2); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move b/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move index d1d64a81a4fac..f9a27e4c33838 100644 --- a/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move @@ -16,21 +16,21 @@ spec aptos_std::copyable_any { ensures [abstract] from_bcs::deserializable(result.data); } - spec unpack(x: Any): T { + spec unpack(self: Any): T { use aptos_std::from_bcs; include UnpackAbortsIf; - ensures result == from_bcs::deserialize(x.data); + ensures result == from_bcs::deserialize(self.data); } spec schema UnpackAbortsIf { use aptos_std::from_bcs; - x: Any; - aborts_if type_info::type_name() != x.type_name; - aborts_if !from_bcs::deserializable(x.data); + self: Any; + aborts_if type_info::type_name() != self.type_name; + aborts_if !from_bcs::deserializable(self.data); } - spec type_name(x: &Any): &String { + spec type_name(self: &Any): &String { aborts_if false; - ensures result == x.type_name; + ensures result == self.type_name; } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/bls12381.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/bls12381.move index de7d05ad8b18b..83a68341ba83f 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/bls12381.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/bls12381.move @@ -194,7 +194,7 @@ module aptos_std::bls12381 { /// Deserializes an aggregate-or-multi-signature from 96 bytes. public fun aggr_or_multi_signature_from_bytes(bytes: vector): AggrOrMultiSignature { - assert!(std::vector::length(&bytes) == SIGNATURE_SIZE, std::error::invalid_argument(EWRONG_SIZE)); + assert!(bytes.length() == SIGNATURE_SIZE, std::error::invalid_argument(EWRONG_SIZE)); AggrOrMultiSignature { bytes @@ -267,41 +267,37 @@ module aptos_std::bls12381 { #[test_only] /// Generates a multi-signature for a message with multiple signing keys. public fun multi_sign_arbitrary_bytes(signing_keys: &vector, message: vector): AggrOrMultiSignature { - let n = std::vector::length(signing_keys); + let n = signing_keys.length(); let sigs = vector[]; - let i: u64 = 0; - while (i < n) { - let sig = sign_arbitrary_bytes(std::vector::borrow(signing_keys, i), message); - std::vector::push_back(&mut sigs, sig); - i = i + 1; + for (i in 0..n) { + let sig = sign_arbitrary_bytes(&signing_keys[i], message); + sigs.push_back(sig); }; let multisig = aggregate_signatures(sigs); - option::extract(&mut multisig) + multisig.extract() } #[test_only] /// Generates an aggregated signature over all messages in messages, where signing_keys[i] signs messages[i]. public fun aggr_sign_arbitrary_bytes(signing_keys: &vector, messages: &vector>): AggrOrMultiSignature { - let signing_key_count = std::vector::length(signing_keys); - let message_count = std::vector::length(messages); + let signing_key_count = signing_keys.length(); + let message_count = messages.length(); assert!(signing_key_count == message_count, invalid_argument(E_NUM_SIGNERS_MUST_EQ_NUM_MESSAGES)); let sigs = vector[]; - let i: u64 = 0; - while (i < signing_key_count) { - let sig = sign_arbitrary_bytes(std::vector::borrow(signing_keys, i), *std::vector::borrow(messages, i)); - std::vector::push_back(&mut sigs, sig); - i = i + 1; + for (i in 0..signing_key_count) { + let sig = sign_arbitrary_bytes(&signing_keys[i], messages[i]); + sigs.push_back(sig); }; let aggr_sig = aggregate_signatures(sigs); - option::extract(&mut aggr_sig) + aggr_sig.extract() } #[test_only] /// Returns a mauled copy of a byte array. public fun maul_bytes(bytes: &vector): vector { let new_bytes = *bytes; - let first_byte = std::vector::borrow_mut(&mut new_bytes, 0); - *first_byte = *first_byte ^ 0xff; + let first_byte = &mut new_bytes[0]; + *first_byte ^= 0xff; new_bytes } @@ -510,9 +506,15 @@ module aptos_std::bls12381 { #[test] fun test_pubkey_validation() { // test low order points (in group for PK) - assert!(option::is_none(&public_key_from_bytes(x"ae3cd9403b69c20a0d455fd860e977fe6ee7140a7f091f26c860f2caccd3e0a7a7365798ac10df776675b3a67db8faa0")), 1); - assert!(option::is_none(&public_key_from_bytes(x"928d4862a40439a67fd76a9c7560e2ff159e770dcf688ff7b2dd165792541c88ee76c82eb77dd6e9e72c89cbf1a56a68")), 1); - assert!(option::is_some(&public_key_from_bytes(x"b3e4921277221e01ed71284be5e3045292b26c7f465a6fcdba53ee47edd39ec5160da3b229a73c75671024dcb36de091")), 1); + assert!( + public_key_from_bytes(x"ae3cd9403b69c20a0d455fd860e977fe6ee7140a7f091f26c860f2caccd3e0a7a7365798ac10df776675b3a67db8faa0").is_none( + ), 1); + assert!( + public_key_from_bytes(x"928d4862a40439a67fd76a9c7560e2ff159e770dcf688ff7b2dd165792541c88ee76c82eb77dd6e9e72c89cbf1a56a68").is_none( + ), 1); + assert!( + public_key_from_bytes(x"b3e4921277221e01ed71284be5e3045292b26c7f465a6fcdba53ee47edd39ec5160da3b229a73c75671024dcb36de091").is_some( + ), 1); } #[test] @@ -544,18 +546,15 @@ module aptos_std::bls12381 { AggrPublicKeysWithPoP { bytes: x"b53df1cfee2168f59e5792e710bf22928dc0553e6531dae5c7656c0a66fc12cb82fbb04863938c953dc901a5a79cc0f3" }, ]; - let i = 0; let accum_pk = std::vector::empty(); - while (i < std::vector::length(&pks)) { - std::vector::push_back(&mut accum_pk, *std::vector::borrow(&pks, i)); + for (i in 0..pks.length()) { + accum_pk.push_back(pks[i]); let apk = aggregate_pubkeys(accum_pk); // Make sure PKs were aggregated correctly - assert!(apk == *std::vector::borrow(&agg_pks, i), 1); + assert!(apk == agg_pks[i], 1); assert!(validate_pubkey_internal(apk.bytes), 1); - - i = i + 1; }; } @@ -563,14 +562,14 @@ module aptos_std::bls12381 { fun test_pubkey_validation_against_invalid_keys() { let (_sk, pk) = generate_keys(); let pk_bytes = public_key_with_pop_to_bytes(&pk); - assert!(option::is_some(&public_key_from_bytes(pk_bytes)), 1); - assert!(option::is_none(&public_key_from_bytes(maul_bytes(&pk_bytes))), 1); + assert!(public_key_from_bytes(pk_bytes).is_some(), 1); + assert!(public_key_from_bytes(maul_bytes(&pk_bytes)).is_none(), 1); } #[test] fun test_signature_aggregation() { // First, test empty aggregation - assert!(option::is_none(&mut aggregate_signatures(vector[])), 1); + assert!(aggregate_signatures(vector[]).is_none(), 1); // Second, try some test-cases generated by running the following command in `crates/aptos-crypto`: // $ cargo test -- sample_aggregate_sigs --nocapture --include-ignored @@ -593,24 +592,21 @@ module aptos_std::bls12381 { AggrOrMultiSignature { bytes: x"8284e4e3983f29cb45020c3e2d89066df2eae533a01cb6ca2c4d466b5e02dd22467f59640aa120db2b9cc49e931415c3097e3d54ff977fd9067b5bc6cfa1c885d9d8821aef20c028999a1d97e783ae049d8fa3d0bbac36ce4ca8e10e551d3461" }, ]; - let i = 0; let accum_sigs = std::vector::empty(); - while (i < std::vector::length(&sigs)) { - std::vector::push_back(&mut accum_sigs, *std::vector::borrow(&sigs, i)); + for (i in 0..sigs.length()) { + accum_sigs.push_back(sigs[i]); - let multisig = option::extract(&mut aggregate_signatures(accum_sigs)); + let multisig = aggregate_signatures(accum_sigs).extract(); // Make sure sigs were aggregated correctly - assert!(multisig == *std::vector::borrow(&multisigs, i), 1); + assert!(multisig == multisigs[i], 1); assert!(signature_subgroup_check_internal(multisig.bytes), 1); - - i = i + 1; }; } #[test] fun test_empty_signature_aggregation() { - assert!(option::is_none(&mut aggregate_signatures(vector[])), 1); + assert!(aggregate_signatures(vector[]).is_none(), 1); } #[test] @@ -643,18 +639,15 @@ module aptos_std::bls12381 { AggrOrMultiSignature { bytes: x"b627b2cfd8ae59dcf5e58cc6c230ae369985fd096e1bc3be38da5deafcbed7d939f07cccc75383539940c56c6b6453db193f563f5b6e4fe54915afd9e1baea40a297fa7eda74abbdcd4cc5c667d6db3b9bd265782f7693798894400f2beb4637" }, ]; - let i = 0; let accum_pk = std::vector::empty(); - while (i < std::vector::length(&pks)) { - std::vector::push_back(&mut accum_pk, *std::vector::borrow(&pks, i)); + for (i in 0..pks.length()) { + accum_pk.push_back(pks[i]); let apk = aggregate_pubkeys(accum_pk); - assert!(apk == *std::vector::borrow(&agg_pks, i), 1); - - assert!(verify_multisignature(std::vector::borrow(&multisigs, i), &apk, b"Hello, Aptoverse!"), 1); + assert!(apk == agg_pks[i], 1); - i = i + 1; + assert!(verify_multisignature(&multisigs[i], &apk, b"Hello, Aptoverse!"), 1); }; } @@ -667,12 +660,10 @@ module aptos_std::bls12381 { // Generate key pairs. let signing_keys = vector[]; let public_keys = vector[]; - let i = 0; - while (i < signer_count) { + for (i in 0..signer_count) { let (sk, pk) = generate_keys(); - std::vector::push_back(&mut signing_keys, sk); - std::vector::push_back(&mut public_keys, pk); - i = i + 1; + signing_keys.push_back(sk); + public_keys.push_back(pk); }; // Generate multi-signature. @@ -687,18 +678,16 @@ module aptos_std::bls12381 { // Also test signature aggregation. let signatures = vector[]; - let i = 0; - while (i < signer_count) { - let sk = std::vector::borrow(&signing_keys, i); + for (i in 0..signer_count) { + let sk = signing_keys.borrow(i); let sig = sign_arbitrary_bytes(sk, msg); - std::vector::push_back(&mut signatures, sig); - i = i + 1; + signatures.push_back(sig); }; - let aggregated_signature = option::extract(&mut aggregate_signatures(signatures)); + let aggregated_signature = aggregate_signatures(signatures).extract(); assert!(aggr_or_multi_signature_subgroup_check(&aggregated_signature), 1); assert!(aggr_or_multi_signature_to_bytes(&aggregated_signature) == aggr_or_multi_signature_to_bytes(&multisig), 1); - signer_count = signer_count + 1; + signer_count += 1; } } @@ -770,15 +759,15 @@ module aptos_std::bls12381 { let i = 0; let msg_subset = std::vector::empty>(); let pk_subset = std::vector::empty(); - while (i < std::vector::length(&pks)) { - let aggsig = *std::vector::borrow(&aggsigs, i); + while (i < pks.length()) { + let aggsig = aggsigs[i]; - std::vector::push_back(&mut pk_subset, *std::vector::borrow(&pks, i)); - std::vector::push_back(&mut msg_subset, *std::vector::borrow(&msgs, i)); + pk_subset.push_back(pks[i]); + msg_subset.push_back(msgs[i]); assert!(verify_aggregate_signature(&aggsig, pk_subset, msg_subset), 1); - i = i + 1; + i += 1; }; } @@ -794,23 +783,23 @@ module aptos_std::bls12381 { let i = 0; while (i < signer_count) { let (sk, pk) = generate_keys(); - std::vector::push_back(&mut signing_keys, sk); - std::vector::push_back(&mut public_keys, pk); + signing_keys.push_back(sk); + public_keys.push_back(pk); let msg: vector = vector[104, 101, 108, 108, 111, 32, 97, 112, 116, 111, 115, 32, 117, 115, 101, 114, 32, 48+(i as u8)]; //"hello aptos user {i}" - std::vector::push_back(&mut messages, msg); - i = i + 1; + messages.push_back(msg); + i += 1; }; // Maul messages and public keys. - let mauled_public_keys = vector[maul_public_key_with_pop(std::vector::borrow(&public_keys, 0))]; - let mauled_messages = vector[maul_bytes(std::vector::borrow(&messages, 0))]; + let mauled_public_keys = vector[maul_public_key_with_pop(public_keys.borrow(0))]; + let mauled_messages = vector[maul_bytes(messages.borrow(0))]; let i = 1; while (i < signer_count) { - let pk = std::vector::borrow(&public_keys, i); - let msg = std::vector::borrow(&messages, i); - std::vector::push_back(&mut mauled_public_keys, *pk); - std::vector::push_back(&mut mauled_messages, *msg); - i = i + 1; + let pk = public_keys.borrow(i); + let msg = messages.borrow(i); + mauled_public_keys.push_back(*pk); + mauled_messages.push_back(*msg); + i += 1; }; // Generate aggregated signature. @@ -826,16 +815,16 @@ module aptos_std::bls12381 { let signatures = vector[]; let i = 0; while (i < signer_count) { - let sk = std::vector::borrow(&signing_keys, i); - let msg = std::vector::borrow(&messages, i); + let sk = signing_keys.borrow(i); + let msg = messages.borrow(i); let sig = sign_arbitrary_bytes(sk, *msg); - std::vector::push_back(&mut signatures, sig); - i = i + 1; + signatures.push_back(sig); + i += 1; }; - let aggrsig_another = option::extract(&mut aggregate_signatures(signatures)); + let aggrsig_another = aggregate_signatures(signatures).extract(); assert!(aggr_or_multi_signature_to_bytes(&aggrsig_another) == aggr_or_multi_signature_to_bytes(&aggrsig), 1); - signer_count = signer_count + 1; + signer_count += 1; } } @@ -852,12 +841,14 @@ module aptos_std::bls12381 { // First, test signatures that verify let ok = verify_normal_signature( &signature_from_bytes(x"b01ce4632e94d8c611736e96aa2ad8e0528a02f927a81a92db8047b002a8c71dc2d6bfb94729d0973790c10b6ece446817e4b7543afd7ca9a17c75de301ae835d66231c26a003f11ae26802b98d90869a9e73788c38739f7ac9d52659e1f7cf7"), - &option::extract(&mut public_key_from_bytes(x"94209a296b739577cb076d3bfb1ca8ee936f29b69b7dae436118c4dd1cc26fd43dcd16249476a006b8b949bf022a7858")), + &public_key_from_bytes(x"94209a296b739577cb076d3bfb1ca8ee936f29b69b7dae436118c4dd1cc26fd43dcd16249476a006b8b949bf022a7858").extract( + ), message, ); assert!(ok == true, 1); - let pk = option::extract(&mut public_key_from_bytes(x"94209a296b739577cb076d3bfb1ca8ee936f29b69b7dae436118c4dd1cc26fd43dcd16249476a006b8b949bf022a7858")); + let pk = public_key_from_bytes(x"94209a296b739577cb076d3bfb1ca8ee936f29b69b7dae436118c4dd1cc26fd43dcd16249476a006b8b949bf022a7858").extract( + ); let pk_with_pop = PublicKeyWithPoP { bytes: pk.bytes }; let ok = verify_signature_share( @@ -885,12 +876,12 @@ module aptos_std::bls12381 { ]; let i = 0; - while (i < std::vector::length(&pks)) { - let sig = std::vector::borrow(&sigs, i); - let pk = *std::vector::borrow(&pks, i); - let msg = *std::vector::borrow(&messages, i); + while (i < pks.length()) { + let sig = sigs.borrow(i); + let pk = pks[i]; + let msg = messages[i]; - let pk = option::extract(&mut public_key_from_bytes(pk)); + let pk = public_key_from_bytes(pk).extract(); let notok = verify_normal_signature( sig, @@ -906,7 +897,7 @@ module aptos_std::bls12381 { ); assert!(notok == false, 1); - i = i + 1; + i += 1; } } @@ -950,27 +941,27 @@ module aptos_std::bls12381 { proof_of_possession_from_bytes(x"8899b294f3c066e6dfb59bc0843265a1ccd6afc8f0f38a074d45ded8799c39d25ee0376cd6d6153b0d4d2ff8655e578b140254f1287b9e9df4e2aecc5b049d8556a4ab07f574df68e46348fd78e5298b7913377cf5bb3cf4796bfc755902bfdd"), ]; - assert!(std::vector::length(&pks) == std::vector::length(&pops), 1); + assert!(pks.length() == pops.length(), 1); let i = 0; - while (i < std::vector::length(&pks)) { - let opt_pk = public_key_from_bytes_with_pop(*std::vector::borrow(&pks, i), std::vector::borrow(&pops, i)); - assert!(option::is_some(&opt_pk), 1); + while (i < pks.length()) { + let opt_pk = public_key_from_bytes_with_pop(pks[i], pops.borrow(i)); + assert!(opt_pk.is_some(), 1); - i = i + 1; + i += 1; }; // assert first PK's PoP does not verify against modifed PK' = 0xa0 | PK[1:] let opt_pk = public_key_from_bytes_with_pop( x"a08864c91ae7a9998b3f5ee71f447840864e56d79838e4785ff5126c51480198df3d972e1e0348c6da80d396983e42d7", &proof_of_possession_from_bytes(x"ab42afff92510034bf1232a37a0d31bc8abfc17e7ead9170d2d100f6cf6c75ccdcfedbd31699a112b4464a06fd636f3f190595863677d660b4c5d922268ace421f9e86e3a054946ee34ce29e1f88c1a10f27587cf5ec528d65ba7c0dc4863364")); - assert!(option::is_none(&opt_pk), 1); + assert!(opt_pk.is_none(), 1); // assert first PK's PoP does not verify if modifed as pop' = 0xb0 | pop[1:] let opt_pk = public_key_from_bytes_with_pop( x"808864c91ae7a9998b3f5ee71f447840864e56d79838e4785ff5126c51480198df3d972e1e0348c6da80d396983e42d7", &proof_of_possession_from_bytes(x"bb42afff92510034bf1232a37a0d31bc8abfc17e7ead9170d2d100f6cf6c75ccdcfedbd31699a112b4464a06fd636f3f190595863677d660b4c5d922268ace421f9e86e3a054946ee34ce29e1f88c1a10f27587cf5ec528d65ba7c0dc4863364")); - assert!(option::is_none(&opt_pk), 1); + assert!(opt_pk.is_none(), 1); } #[test] @@ -978,8 +969,8 @@ module aptos_std::bls12381 { let (sk, pk) = generate_keys(); let pk_bytes = public_key_with_pop_to_bytes(&pk); let pop = generate_proof_of_possession(&sk); - assert!(option::is_some(&public_key_from_bytes_with_pop(pk_bytes, &pop)), 1); - assert!(option::is_none(&public_key_from_bytes_with_pop(pk_bytes, &maul_proof_of_possession(&pop))), 1); - assert!(option::is_none(&public_key_from_bytes_with_pop(maul_bytes(&pk_bytes), &pop)), 1); + assert!(public_key_from_bytes_with_pop(pk_bytes, &pop).is_some(), 1); + assert!(public_key_from_bytes_with_pop(pk_bytes, &maul_proof_of_possession(&pop)).is_none(), 1); + assert!(public_key_from_bytes_with_pop(maul_bytes(&pk_bytes), &pop).is_none(), 1); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/bls12381_algebra.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/bls12381_algebra.move index 5fb99beb93c1b..a3cee9d28fc6c 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/bls12381_algebra.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/bls12381_algebra.move @@ -254,8 +254,8 @@ module aptos_std::bls12381_algebra { fun rand_vector(num: u64): vector> { let elements = vector[]; while (num > 0) { - std::vector::push_back(&mut elements, rand_insecure()); - num = num - 1; + elements.push_back(rand_insecure()); + num -= 1; }; elements } @@ -273,10 +273,10 @@ module aptos_std::bls12381_algebra { assert!(FQ12_VAL_0_SERIALIZED == serialize(&val_0), 1); assert!(FQ12_VAL_1_SERIALIZED == serialize(&val_1), 1); let val_7 = from_u64(7); - let val_7_another = std::option::extract(&mut deserialize(&FQ12_VAL_7_SERIALIZED)); + let val_7_another = deserialize(&FQ12_VAL_7_SERIALIZED).extract(); assert!(eq(&val_7, &val_7_another), 1); assert!(FQ12_VAL_7_SERIALIZED == serialize(&val_7), 1); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); + assert!(deserialize(&x"ffff").is_none(), 1); // Negation. let val_minus_7 = neg(&val_7); @@ -296,19 +296,19 @@ module aptos_std::bls12381_algebra { // division. let val_0 = from_u64(0); - assert!(eq(&val_7, &std::option::extract(&mut div(&val_63, &val_9))), 1); - assert!(std::option::is_none(&div(&val_63, &val_0)), 1); + assert!(eq(&val_7, &div(&val_63, &val_9).extract()), 1); + assert!(div(&val_63, &val_0).is_none(), 1); // Inversion. assert!(eq(&val_minus_7, &neg(&val_7)), 1); - assert!(std::option::is_none(&inv(&val_0)), 1); + assert!(inv(&val_0).is_none(), 1); // Squaring. let val_x = rand_insecure(); assert!(eq(&mul(&val_x, &val_x), &sqr(&val_x)), 1); // Downcasting. - assert!(eq(&zero(), &std::option::extract(&mut downcast(&val_1))), 1); + assert!(eq(&zero(), &downcast(&val_1).extract()), 1); } #[test_only] @@ -342,49 +342,67 @@ module aptos_std::bls12381_algebra { // Serialization/deserialization. assert!(G1_GENERATOR_SERIALIZED_UNCOMP == serialize(&generator), 1); assert!(G1_GENERATOR_SERIALIZED_COMP == serialize(&generator), 1); - let generator_from_comp = std::option::extract(&mut deserialize(&G1_GENERATOR_SERIALIZED_COMP - )); - let generator_from_uncomp = std::option::extract(&mut deserialize(&G1_GENERATOR_SERIALIZED_UNCOMP - )); + let generator_from_comp = deserialize(&G1_GENERATOR_SERIALIZED_COMP + ).extract(); + let generator_from_uncomp = deserialize(&G1_GENERATOR_SERIALIZED_UNCOMP + ).extract(); assert!(eq(&generator, &generator_from_comp), 1); assert!(eq(&generator, &generator_from_uncomp), 1); // Deserialization should fail if given a byte array of correct size but the value is not a member. - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").is_none( + ), 1); assert!( G1_INF_SERIALIZED_UNCOMP == serialize(&point_at_infinity), 1); assert!(G1_INF_SERIALIZED_COMP == serialize(&point_at_infinity), 1); - let inf_from_uncomp = std::option::extract(&mut deserialize(&G1_INF_SERIALIZED_UNCOMP - )); - let inf_from_comp = std::option::extract(&mut deserialize(&G1_INF_SERIALIZED_COMP - )); + let inf_from_uncomp = deserialize(&G1_INF_SERIALIZED_UNCOMP + ).extract(); + let inf_from_comp = deserialize(&G1_INF_SERIALIZED_COMP + ).extract(); assert!(eq(&point_at_infinity, &inf_from_comp), 1); assert!(eq(&point_at_infinity, &inf_from_uncomp), 1); - let point_7g_from_uncomp = std::option::extract(&mut deserialize(&G1_GENERATOR_MUL_BY_7_SERIALIZED_UNCOMP - )); - let point_7g_from_comp = std::option::extract(&mut deserialize(&G1_GENERATOR_MUL_BY_7_SERIALIZED_COMP - )); + let point_7g_from_uncomp = deserialize(&G1_GENERATOR_MUL_BY_7_SERIALIZED_UNCOMP + ).extract(); + let point_7g_from_comp = deserialize(&G1_GENERATOR_MUL_BY_7_SERIALIZED_COMP + ).extract(); assert!(eq(&point_7g_from_comp, &point_7g_from_uncomp), 1); // Deserialization should fail if given a point on the curve but off its prime-order subgroup, e.g., `(0,2)`. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002")), 1); - assert!(std::option::is_none(&deserialize(&x"800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002").is_none( + ), 1); + assert!( + deserialize(&x"800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").is_none( + ), 1); // Deserialization should fail if given a valid point in (Fq,Fq) but not on the curve. - assert!(std::option::is_none(&deserialize(&x"8959e137e0719bf872abb08411010f437a8955bd42f5ba20fca64361af58ce188b1adb96ef229698bb7860b79e24ba12000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!( + deserialize(&x"8959e137e0719bf872abb08411010f437a8955bd42f5ba20fca64361af58ce188b1adb96ef229698bb7860b79e24ba12000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").is_none( + ), 1); // Deserialization should fail if given an invalid point (x not in Fq). - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa76e9853b35f5c9b2002d9e5833fd8f9ab4cd3934a4722a06f6055bfca720c91629811e2ecae7f0cf301b6d07898a90f")), 1); - assert!(std::option::is_none(&deserialize(&x"9fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa76e9853b35f5c9b2002d9e5833fd8f9ab4cd3934a4722a06f6055bfca720c91629811e2ecae7f0cf301b6d07898a90f").is_none( + ), 1); + assert!( + deserialize(&x"9fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); // Scalar multiplication. let scalar_7 = from_u64(7); @@ -402,16 +420,16 @@ module aptos_std::bls12381_algebra { let expected = zero(); let i = 0; while (i < num_entries) { - let element = std::vector::borrow(&elements, i); - let scalar = std::vector::borrow(&scalars, i); + let element = elements.borrow(i); + let scalar = scalars.borrow(i); expected = add(&expected, &scalar_mul(element, scalar)); - i = i + 1; + i += 1; }; let actual = multi_scalar_mul(&elements, &scalars); assert!(eq(&expected, &actual), 1); - num_entries = num_entries + 1; + num_entries += 1; }; // Doubling. @@ -438,10 +456,12 @@ module aptos_std::bls12381_algebra { // Hash-to-group using suite `BLS12381G1_XMD:SHA-256_SSWU_RO_`. // Test vectors source: https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-bls12381g1_xmdsha-256_sswu_ let actual = hash_to(&b"QUUX-V01-CS02-with-BLS12381G1_XMD:SHA-256_SSWU_RO_", &b""); - let expected = std::option::extract(&mut deserialize(&x"052926add2207b76ca4fa57a8734416c8dc95e24501772c814278700eed6d1e4e8cf62d9c09db0fac349612b759e79a108ba738453bfed09cb546dbb0783dbb3a5f1f566ed67bb6be0e8c67e2e81a4cc68ee29813bb7994998f3eae0c9c6a265")); + let expected = deserialize(&x"052926add2207b76ca4fa57a8734416c8dc95e24501772c814278700eed6d1e4e8cf62d9c09db0fac349612b759e79a108ba738453bfed09cb546dbb0783dbb3a5f1f566ed67bb6be0e8c67e2e81a4cc68ee29813bb7994998f3eae0c9c6a265").extract( + ); assert!(eq(&expected, &actual), 1); let actual = hash_to(&b"QUUX-V01-CS02-with-BLS12381G1_XMD:SHA-256_SSWU_RO_", &b"abcdef0123456789"); - let expected = std::option::extract(&mut deserialize(&x"11e0b079dea29a68f0383ee94fed1b940995272407e3bb916bbf268c263ddd57a6a27200a784cbc248e84f357ce82d9803a87ae2caf14e8ee52e51fa2ed8eefe80f02457004ba4d486d6aa1f517c0889501dc7413753f9599b099ebcbbd2d709")); + let expected = deserialize(&x"11e0b079dea29a68f0383ee94fed1b940995272407e3bb916bbf268c263ddd57a6a27200a784cbc248e84f357ce82d9803a87ae2caf14e8ee52e51fa2ed8eefe80f02457004ba4d486d6aa1f517c0889501dc7413753f9599b099ebcbbd2d709").extract( + ); assert!(eq(&expected, &actual), 1); } @@ -474,38 +494,52 @@ module aptos_std::bls12381_algebra { // Serialization/deserialization. assert!(G2_GENERATOR_SERIALIZED_COMP == serialize(&generator), 1); assert!(G2_GENERATOR_SERIALIZED_UNCOMP == serialize(&generator), 1); - let generator_from_uncomp = std::option::extract(&mut deserialize(&G2_GENERATOR_SERIALIZED_UNCOMP - )); - let generator_from_comp = std::option::extract(&mut deserialize(&G2_GENERATOR_SERIALIZED_COMP - )); + let generator_from_uncomp = deserialize(&G2_GENERATOR_SERIALIZED_UNCOMP + ).extract(); + let generator_from_comp = deserialize(&G2_GENERATOR_SERIALIZED_COMP + ).extract(); assert!(eq(&generator, &generator_from_comp), 1); assert!(eq(&generator, &generator_from_uncomp), 1); assert!(G2_INF_SERIALIZED_UNCOMP == serialize(&point_at_infinity), 1); assert!(G2_INF_SERIALIZED_COMP == serialize(&point_at_infinity), 1); - let inf_from_uncomp = std::option::extract(&mut deserialize(&G2_INF_SERIALIZED_UNCOMP)); - let inf_from_comp = std::option::extract(&mut deserialize(&G2_INF_SERIALIZED_COMP)); + let inf_from_uncomp = deserialize(&G2_INF_SERIALIZED_UNCOMP).extract(); + let inf_from_comp = deserialize(&G2_INF_SERIALIZED_COMP).extract(); assert!(eq(&point_at_infinity, &inf_from_comp), 1); assert!(eq(&point_at_infinity, &inf_from_uncomp), 1); - let point_7g_from_uncomp = std::option::extract(&mut deserialize(&G2_GENERATOR_MUL_BY_7_SERIALIZED_UNCOMP - )); - let point_7g_from_comp = std::option::extract(&mut deserialize(&G2_GENERATOR_MUL_BY_7_SERIALIZED_COMP - )); + let point_7g_from_uncomp = deserialize(&G2_GENERATOR_MUL_BY_7_SERIALIZED_UNCOMP + ).extract(); + let point_7g_from_comp = deserialize(&G2_GENERATOR_MUL_BY_7_SERIALIZED_COMP + ).extract(); assert!(eq(&point_7g_from_comp, &point_7g_from_uncomp), 1); // Deserialization should fail if given a point on the curve but not in the prime-order subgroup. - assert!(std::option::is_none(&deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890ddd862a6308796bf47e2265073c1f7d81afd69f9497fc1403e2e97a866129b43b672295229c21116d4a99f3e5c2ae720a31f181dbed8a93e15f909c20cf69d11a8879adbbe6890740def19814e6d4ed23fb0dcbd79291655caf48b466ac9cae04")), 1); - assert!(std::option::is_none(&deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890d")), 1); + assert!( + deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890ddd862a6308796bf47e2265073c1f7d81afd69f9497fc1403e2e97a866129b43b672295229c21116d4a99f3e5c2ae720a31f181dbed8a93e15f909c20cf69d11a8879adbbe6890740def19814e6d4ed23fb0dcbd79291655caf48b466ac9cae04").is_none( + ), 1); + assert!( + deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890d").is_none( + ), 1); // Deserialization should fail if given a valid point in (Fq2,Fq2) but not on the curve. - assert!(std::option::is_none(&deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!( + deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").is_none( + ), 1); // Deserialization should fail if given an invalid point (x not in Fq2). - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdd862a6308796bf47e2265073c1f7d81afd69f9497fc1403e2e97a866129b43b672295229c21116d4a99f3e5c2ae720a31f181dbed8a93e15f909c20cf69d11a8879adbbe6890740def19814e6d4ed23fb0dcbd79291655caf48b466ac9cae04")), 1); - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdd862a6308796bf47e2265073c1f7d81afd69f9497fc1403e2e97a866129b43b672295229c21116d4a99f3e5c2ae720a31f181dbed8a93e15f909c20cf69d11a8879adbbe6890740def19814e6d4ed23fb0dcbd79291655caf48b466ac9cae04").is_none( + ), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); // Scalar multiplication. let scalar_7 = from_u64(7); @@ -523,16 +557,16 @@ module aptos_std::bls12381_algebra { let expected = zero(); let i = 0; while (i < num_entries) { - let element = std::vector::borrow(&elements, i); - let scalar = std::vector::borrow(&scalars, i); + let element = elements.borrow(i); + let scalar = scalars.borrow(i); expected = add(&expected, &scalar_mul(element, scalar)); - i = i + 1; + i += 1; }; let actual = multi_scalar_mul(&elements, &scalars); assert!(eq(&expected, &actual), 1); - num_entries = num_entries + 1; + num_entries += 1; }; // Doubling. @@ -559,10 +593,12 @@ module aptos_std::bls12381_algebra { // Hash-to-group using suite `BLS12381G2_XMD:SHA-256_SSWU_RO_`. // Test vectors source: https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-bls12381g2_xmdsha-256_sswu_ let actual = hash_to(&b"QUUX-V01-CS02-with-BLS12381G2_XMD:SHA-256_SSWU_RO_", &b""); - let expected = std::option::extract(&mut deserialize(&x"05cb8437535e20ecffaef7752baddf98034139c38452458baeefab379ba13dff5bf5dd71b72418717047f5b0f37da03d0141ebfbdca40eb85b87142e130ab689c673cf60f1a3e98d69335266f30d9b8d4ac44c1038e9dcdd5393faf5c41fb78a12424ac32561493f3fe3c260708a12b7c620e7be00099a974e259ddc7d1f6395c3c811cdd19f1e8dbf3e9ecfdcbab8d60503921d7f6a12805e72940b963c0cf3471c7b2a524950ca195d11062ee75ec076daf2d4bc358c4b190c0c98064fdd92")); + let expected = deserialize(&x"05cb8437535e20ecffaef7752baddf98034139c38452458baeefab379ba13dff5bf5dd71b72418717047f5b0f37da03d0141ebfbdca40eb85b87142e130ab689c673cf60f1a3e98d69335266f30d9b8d4ac44c1038e9dcdd5393faf5c41fb78a12424ac32561493f3fe3c260708a12b7c620e7be00099a974e259ddc7d1f6395c3c811cdd19f1e8dbf3e9ecfdcbab8d60503921d7f6a12805e72940b963c0cf3471c7b2a524950ca195d11062ee75ec076daf2d4bc358c4b190c0c98064fdd92").extract( + ); assert!(eq(&expected, &actual), 1); let actual = hash_to(&b"QUUX-V01-CS02-with-BLS12381G2_XMD:SHA-256_SSWU_RO_", &b"abcdef0123456789"); - let expected = std::option::extract(&mut deserialize(&x"190d119345b94fbd15497bcba94ecf7db2cbfd1e1fe7da034d26cbba169fb3968288b3fafb265f9ebd380512a71c3f2c121982811d2491fde9ba7ed31ef9ca474f0e1501297f68c298e9f4c0028add35aea8bb83d53c08cfc007c1e005723cd00bb5e7572275c567462d91807de765611490205a941a5a6af3b1691bfe596c31225d3aabdf15faff860cb4ef17c7c3be05571a0f8d3c08d094576981f4a3b8eda0a8e771fcdcc8ecceaf1356a6acf17574518acb506e435b639353c2e14827c8")); + let expected = deserialize(&x"190d119345b94fbd15497bcba94ecf7db2cbfd1e1fe7da034d26cbba169fb3968288b3fafb265f9ebd380512a71c3f2c121982811d2491fde9ba7ed31ef9ca474f0e1501297f68c298e9f4c0028add35aea8bb83d53c08cfc007c1e005723cd00bb5e7572275c567462d91807de765611490205a941a5a6af3b1691bfe596c31225d3aabdf15faff860cb4ef17c7c3be05571a0f8d3c08d094576981f4a3b8eda0a8e771fcdcc8ecceaf1356a6acf17574518acb506e435b639353c2e14827c8").extract( + ); assert!(eq(&expected, &actual), 1); } @@ -586,20 +622,24 @@ module aptos_std::bls12381_algebra { // Serialization/deserialization. assert!(GT_GENERATOR_SERIALIZED == serialize(&generator), 1); - let generator_from_deser = std::option::extract(&mut deserialize(>_GENERATOR_SERIALIZED)); + let generator_from_deser = deserialize(>_GENERATOR_SERIALIZED).extract(); assert!(eq(&generator, &generator_from_deser), 1); assert!(FQ12_ONE_SERIALIZED == serialize(&identity), 1); - let identity_from_deser = std::option::extract(&mut deserialize(&FQ12_ONE_SERIALIZED)); + let identity_from_deser = deserialize(&FQ12_ONE_SERIALIZED).extract(); assert!(eq(&identity, &identity_from_deser), 1); - let element_7g_from_deser = std::option::extract(&mut deserialize(>_GENERATOR_MUL_BY_7_SERIALIZED - )); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); + let element_7g_from_deser = deserialize(>_GENERATOR_MUL_BY_7_SERIALIZED + ).extract(); + assert!(deserialize(&x"ffff").is_none(), 1); // Deserialization should fail if given an element in Fq12 but not in the prime-order subgroup. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); // Element scalar multiplication. let scalar_7 = from_u64(7); @@ -653,22 +693,30 @@ module aptos_std::bls12381_algebra { assert!(FR_VAL_0_SERIALIZED_LSB == serialize(&val_0), 1); assert!(FR_VAL_1_SERIALIZED_LSB == serialize(&val_1), 1); let val_7 = from_u64(7); - let val_7_2nd = std::option::extract(&mut deserialize(&FR_VAL_7_SERIALIZED_LSB)); - let val_7_3rd = std::option::extract(&mut deserialize(&FR_VAL_7_SERIALIZED_MSB)); + let val_7_2nd = deserialize(&FR_VAL_7_SERIALIZED_LSB).extract(); + let val_7_3rd = deserialize(&FR_VAL_7_SERIALIZED_MSB).extract(); assert!(eq(&val_7, &val_7_2nd), 1); assert!(eq(&val_7, &val_7_3rd), 1); assert!(FR_VAL_7_SERIALIZED_LSB == serialize(&val_7), 1); assert!(FR_VAL_7_SERIALIZED_MSB == serialize(&val_7), 1); // Deserialization should fail if given a byte array of right size but the value is not a member. - assert!(std::option::is_none(&deserialize(&x"01000000fffffffffe5bfeff02a4bd5305d8a10908d83933487d9d2953a7ed73")), 1); - assert!(std::option::is_none(&deserialize(&x"73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001")), 1); + assert!( + deserialize(&x"01000000fffffffffe5bfeff02a4bd5305d8a10908d83933487d9d2953a7ed73").is_none( + ), 1); + assert!( + deserialize(&x"73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"01000000fffffffffe5bfeff02a4bd5305d8a10908d83933487d9d2953a7ed7300")), 1); - assert!(std::option::is_none(&deserialize(&x"0073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001")), 1); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); + assert!( + deserialize(&x"01000000fffffffffe5bfeff02a4bd5305d8a10908d83933487d9d2953a7ed7300").is_none( + ), 1); + assert!( + deserialize(&x"0073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001").is_none( + ), 1); + assert!(deserialize(&x"ffff").is_none(), 1); + assert!(deserialize(&x"ffff").is_none(), 1); // Negation. let val_minus_7 = neg(&val_7); @@ -688,12 +736,12 @@ module aptos_std::bls12381_algebra { // division. let val_0 = from_u64(0); - assert!(eq(&val_7, &std::option::extract(&mut div(&val_63, &val_9))), 1); - assert!(std::option::is_none(&div(&val_63, &val_0)), 1); + assert!(eq(&val_7, &div(&val_63, &val_9).extract()), 1); + assert!(div(&val_63, &val_0).is_none(), 1); // Inversion. assert!(eq(&val_minus_7, &neg(&val_7)), 1); - assert!(std::option::is_none(&inv(&val_0)), 1); + assert!(inv(&val_0).is_none(), 1); // Squaring. let val_x = rand_insecure(); @@ -781,7 +829,7 @@ module aptos_std::bls12381_algebra { let remaining = G1_NUM_MAX; while (remaining > 0) { zero(); - remaining = remaining - 1; + remaining -= 1; } } @@ -792,7 +840,7 @@ module aptos_std::bls12381_algebra { let remaining = G1_NUM_MAX + 1; while (remaining > 0) { zero(); - remaining = remaining - 1; + remaining -= 1; } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/bn254_algebra.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/bn254_algebra.move index a5cff4df7b289..053a355e57a14 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/bn254_algebra.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/bn254_algebra.move @@ -237,8 +237,8 @@ module std::bn254_algebra { fun rand_vector(num: u64): vector> { let elements = vector[]; while (num > 0) { - std::vector::push_back(&mut elements, rand_insecure()); - num = num - 1; + elements.push_back(rand_insecure()); + num -= 1; }; elements } @@ -269,10 +269,10 @@ module std::bn254_algebra { assert!(FQ12_VAL_0_SERIALIZED == serialize(&val_0), 1); assert!(FQ12_VAL_1_SERIALIZED == serialize(&val_1), 1); let val_7 = from_u64(7); - let val_7_another = std::option::extract(&mut deserialize(&FQ12_VAL_7_SERIALIZED)); + let val_7_another = deserialize(&FQ12_VAL_7_SERIALIZED).extract(); assert!(eq(&val_7, &val_7_another), 1); assert!(FQ12_VAL_7_SERIALIZED == serialize(&val_7), 1); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); + assert!(deserialize(&x"ffff").is_none(), 1); // Negation. let val_minus_7 = neg(&val_7); @@ -292,19 +292,19 @@ module std::bn254_algebra { // division. let val_0 = from_u64(0); - assert!(eq(&val_7, &std::option::extract(&mut div(&val_63, &val_9))), 1); - assert!(std::option::is_none(&div(&val_63, &val_0)), 1); + assert!(eq(&val_7, &div(&val_63, &val_9).extract()), 1); + assert!(div(&val_63, &val_0).is_none(), 1); // Inversion. assert!(eq(&val_minus_7, &neg(&val_7)), 1); - assert!(std::option::is_none(&inv(&val_0)), 1); + assert!(inv(&val_0).is_none(), 1); // Squaring. let val_x = rand_insecure(); assert!(eq(&mul(&val_x, &val_x), &sqr(&val_x)), 1); // Downcasting. - assert!(eq(&zero(), &std::option::extract(&mut downcast(&val_1))), 1); + assert!(eq(&zero(), &downcast(&val_1).extract()), 1); // upcasting assert!(eq(&val_1, &upcast(&zero())), 1); } @@ -340,47 +340,65 @@ module std::bn254_algebra { // Serialization/deserialization. assert!(G1_GENERATOR_SERIALIZED_UNCOMP == serialize(&generator), 1); assert!(G1_GENERATOR_SERIALIZED_COMP == serialize(&generator), 1); - let generator_from_comp = std::option::extract(&mut deserialize(&G1_GENERATOR_SERIALIZED_COMP)); - let generator_from_uncomp = std::option::extract(&mut deserialize(&G1_GENERATOR_SERIALIZED_UNCOMP)); + let generator_from_comp = deserialize(&G1_GENERATOR_SERIALIZED_COMP).extract(); + let generator_from_uncomp = deserialize(&G1_GENERATOR_SERIALIZED_UNCOMP).extract(); assert!(eq(&generator, &generator_from_comp), 1); assert!(eq(&generator, &generator_from_uncomp), 1); // Deserialization should fail if given a byte array of correct size but the value is not a member. - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").is_none( + ), 1); assert!( G1_INF_SERIALIZED_UNCOMP == serialize(&point_at_infinity), 1); assert!(G1_INF_SERIALIZED_COMP == serialize(&point_at_infinity), 1); - let inf_from_uncomp = std::option::extract(&mut deserialize(&G1_INF_SERIALIZED_UNCOMP - )); - let inf_from_comp = std::option::extract(&mut deserialize(&G1_INF_SERIALIZED_COMP - )); + let inf_from_uncomp = deserialize(&G1_INF_SERIALIZED_UNCOMP + ).extract(); + let inf_from_comp = deserialize(&G1_INF_SERIALIZED_COMP + ).extract(); assert!(eq(&point_at_infinity, &inf_from_comp), 1); assert!(eq(&point_at_infinity, &inf_from_uncomp), 1); - let point_7g_from_uncomp = std::option::extract(&mut deserialize(&G1_GENERATOR_MUL_BY_7_SERIALIZED_UNCOMP - )); - let point_7g_from_comp = std::option::extract(&mut deserialize(&G1_GENERATOR_MUL_BY_7_SERIALIZED_COMP - )); + let point_7g_from_uncomp = deserialize(&G1_GENERATOR_MUL_BY_7_SERIALIZED_UNCOMP + ).extract(); + let point_7g_from_comp = deserialize(&G1_GENERATOR_MUL_BY_7_SERIALIZED_COMP + ).extract(); assert!(eq(&point_7g_from_comp, &point_7g_from_uncomp), 1); // Deserialization should fail if given a point on the curve but off its prime-order subgroup, e.g., `(0,2)`. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002")), 1); - assert!(std::option::is_none(&deserialize(&x"800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002").is_none( + ), 1); + assert!( + deserialize(&x"800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").is_none( + ), 1); // Deserialization should fail if given a valid point in (Fq,Fq) but not on the curve. - assert!(std::option::is_none(&deserialize(&x"8959e137e0719bf872abb08411010f437a8955bd42f5ba20fca64361af58ce188b1adb96ef229698bb7860b79e24ba12000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!( + deserialize(&x"8959e137e0719bf872abb08411010f437a8955bd42f5ba20fca64361af58ce188b1adb96ef229698bb7860b79e24ba12000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").is_none( + ), 1); // Deserialization should fail if given an invalid point (x not in Fq). - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa76e9853b35f5c9b2002d9e5833fd8f9ab4cd3934a4722a06f6055bfca720c91629811e2ecae7f0cf301b6d07898a90f")), 1); - assert!(std::option::is_none(&deserialize(&x"9fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa76e9853b35f5c9b2002d9e5833fd8f9ab4cd3934a4722a06f6055bfca720c91629811e2ecae7f0cf301b6d07898a90f").is_none( + ), 1); + assert!( + deserialize(&x"9fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); // Scalar multiplication. let scalar_7 = from_u64(7); @@ -398,16 +416,16 @@ module std::bn254_algebra { let expected = zero(); let i = 0; while (i < num_entries) { - let element = std::vector::borrow(&elements, i); - let scalar = std::vector::borrow(&scalars, i); + let element = elements.borrow(i); + let scalar = scalars.borrow(i); expected = add(&expected, &scalar_mul(element, scalar)); - i = i + 1; + i += 1; }; let actual = multi_scalar_mul(&elements, &scalars); assert!(eq(&expected, &actual), 1); - num_entries = num_entries + 1; + num_entries += 1; }; // Doubling. @@ -461,38 +479,52 @@ module std::bn254_algebra { // Serialization/deserialization. assert!(G2_GENERATOR_SERIALIZED_COMP == serialize(&generator), 1); assert!(G2_GENERATOR_SERIALIZED_UNCOMP == serialize(&generator), 1); - let generator_from_uncomp = std::option::extract(&mut deserialize(&G2_GENERATOR_SERIALIZED_UNCOMP - )); - let generator_from_comp = std::option::extract(&mut deserialize(&G2_GENERATOR_SERIALIZED_COMP - )); + let generator_from_uncomp = deserialize(&G2_GENERATOR_SERIALIZED_UNCOMP + ).extract(); + let generator_from_comp = deserialize(&G2_GENERATOR_SERIALIZED_COMP + ).extract(); assert!(eq(&generator, &generator_from_comp), 1); assert!(eq(&generator, &generator_from_uncomp), 1); assert!(G2_INF_SERIALIZED_UNCOMP == serialize(&point_at_infinity), 1); assert!(G2_INF_SERIALIZED_COMP == serialize(&point_at_infinity), 1); - let inf_from_uncomp = std::option::extract(&mut deserialize(&G2_INF_SERIALIZED_UNCOMP)); - let inf_from_comp = std::option::extract(&mut deserialize(&G2_INF_SERIALIZED_COMP)); + let inf_from_uncomp = deserialize(&G2_INF_SERIALIZED_UNCOMP).extract(); + let inf_from_comp = deserialize(&G2_INF_SERIALIZED_COMP).extract(); assert!(eq(&point_at_infinity, &inf_from_comp), 1); assert!(eq(&point_at_infinity, &inf_from_uncomp), 1); - let point_7g_from_uncomp = std::option::extract(&mut deserialize(&G2_GENERATOR_MUL_BY_7_SERIALIZED_UNCOMP - )); - let point_7g_from_comp = std::option::extract(&mut deserialize(&G2_GENERATOR_MUL_BY_7_SERIALIZED_COMP - )); + let point_7g_from_uncomp = deserialize(&G2_GENERATOR_MUL_BY_7_SERIALIZED_UNCOMP + ).extract(); + let point_7g_from_comp = deserialize(&G2_GENERATOR_MUL_BY_7_SERIALIZED_COMP + ).extract(); assert!(eq(&point_7g_from_comp, &point_7g_from_uncomp), 1); // Deserialization should fail if given a point on the curve but not in the prime-order subgroup. - assert!(std::option::is_none(&deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890ddd862a6308796bf47e2265073c1f7d81afd69f9497fc1403e2e97a866129b43b672295229c21116d4a99f3e5c2ae720a31f181dbed8a93e15f909c20cf69d11a8879adbbe6890740def19814e6d4ed23fb0dcbd79291655caf48b466ac9cae04")), 1); - assert!(std::option::is_none(&deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890d")), 1); + assert!( + deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890ddd862a6308796bf47e2265073c1f7d81afd69f9497fc1403e2e97a866129b43b672295229c21116d4a99f3e5c2ae720a31f181dbed8a93e15f909c20cf69d11a8879adbbe6890740def19814e6d4ed23fb0dcbd79291655caf48b466ac9cae04").is_none( + ), 1); + assert!( + deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890d").is_none( + ), 1); // Deserialization should fail if given a valid point in (Fq2,Fq2) but not on the curve. - assert!(std::option::is_none(&deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!( + deserialize(&x"f037d4ccd5ee751eba1c1fd4c7edbb76d2b04c3a1f3f554827cf37c3acbc2dbb7cdb320a2727c2462d6c55ca1f637707b96eeebc622c1dbe7c56c34f93887c8751b42bd04f29253a82251c192ef27ece373993b663f4360505299c5bd18c890d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").is_none( + ), 1); // Deserialization should fail if given an invalid point (x not in Fq2). - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdd862a6308796bf47e2265073c1f7d81afd69f9497fc1403e2e97a866129b43b672295229c21116d4a99f3e5c2ae720a31f181dbed8a93e15f909c20cf69d11a8879adbbe6890740def19814e6d4ed23fb0dcbd79291655caf48b466ac9cae04")), 1); - assert!(std::option::is_none(&deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdd862a6308796bf47e2265073c1f7d81afd69f9497fc1403e2e97a866129b43b672295229c21116d4a99f3e5c2ae720a31f181dbed8a93e15f909c20cf69d11a8879adbbe6890740def19814e6d4ed23fb0dcbd79291655caf48b466ac9cae04").is_none( + ), 1); + assert!( + deserialize(&x"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); // Scalar multiplication. let scalar_7 = from_u64(7); @@ -510,16 +542,16 @@ module std::bn254_algebra { let expected = zero(); let i = 0; while (i < num_entries) { - let element = std::vector::borrow(&elements, i); - let scalar = std::vector::borrow(&scalars, i); + let element = elements.borrow(i); + let scalar = scalars.borrow(i); expected = add(&expected, &scalar_mul(element, scalar)); - i = i + 1; + i += 1; }; let actual = multi_scalar_mul(&elements, &scalars); assert!(eq(&expected, &actual), 1); - num_entries = num_entries + 1; + num_entries += 1; }; // Doubling. @@ -565,20 +597,24 @@ module std::bn254_algebra { // Serialization/deserialization. assert!(GT_GENERATOR_SERIALIZED == serialize(&generator), 1); - let generator_from_deser = std::option::extract(&mut deserialize(>_GENERATOR_SERIALIZED)); + let generator_from_deser = deserialize(>_GENERATOR_SERIALIZED).extract(); assert!(eq(&generator, &generator_from_deser), 1); assert!(FQ12_ONE_SERIALIZED == serialize(&identity), 1); - let identity_from_deser = std::option::extract(&mut deserialize(&FQ12_ONE_SERIALIZED)); + let identity_from_deser = deserialize(&FQ12_ONE_SERIALIZED).extract(); assert!(eq(&identity, &identity_from_deser), 1); - let element_7g_from_deser = std::option::extract(&mut deserialize(>_GENERATOR_MUL_BY_7_SERIALIZED - )); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); + let element_7g_from_deser = deserialize(>_GENERATOR_MUL_BY_7_SERIALIZED + ).extract(); + assert!(deserialize(&x"ffff").is_none(), 1); // Deserialization should fail if given an element in Fq12 but not in the prime-order subgroup. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab")), 1); + assert!( + deserialize(&x"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ab").is_none( + ), 1); // Element scalar multiplication. let scalar_7 = from_u64(7); @@ -632,22 +668,30 @@ module std::bn254_algebra { assert!(FR_VAL_0_SERIALIZED_LSB == serialize(&val_0), 1); assert!(FR_VAL_1_SERIALIZED_LSB == serialize(&val_1), 1); let val_7 = from_u64(7); - let val_7_2nd = std::option::extract(&mut deserialize(&FR_VAL_7_SERIALIZED_LSB)); - let val_7_3rd = std::option::extract(&mut deserialize(&FR_VAL_7_SERIALIZED_MSB)); + let val_7_2nd = deserialize(&FR_VAL_7_SERIALIZED_LSB).extract(); + let val_7_3rd = deserialize(&FR_VAL_7_SERIALIZED_MSB).extract(); assert!(eq(&val_7, &val_7_2nd), 1); assert!(eq(&val_7, &val_7_3rd), 1); assert!(FR_VAL_7_SERIALIZED_LSB == serialize(&val_7), 1); assert!(FR_VAL_7_SERIALIZED_MSB == serialize(&val_7), 1); // Deserialization should fail if given a byte array of right size but the value is not a member. - assert!(std::option::is_none(&deserialize(&x"01000000fffffffffe5bfeff02a4bd5305d8a10908d83933487d9d2953a7ed73")), 1); - assert!(std::option::is_none(&deserialize(&x"73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001")), 1); + assert!( + deserialize(&x"01000000fffffffffe5bfeff02a4bd5305d8a10908d83933487d9d2953a7ed73").is_none( + ), 1); + assert!( + deserialize(&x"73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"01000000fffffffffe5bfeff02a4bd5305d8a10908d83933487d9d2953a7ed7300")), 1); - assert!(std::option::is_none(&deserialize(&x"0073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001")), 1); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); + assert!( + deserialize(&x"01000000fffffffffe5bfeff02a4bd5305d8a10908d83933487d9d2953a7ed7300").is_none( + ), 1); + assert!( + deserialize(&x"0073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001").is_none( + ), 1); + assert!(deserialize(&x"ffff").is_none(), 1); + assert!(deserialize(&x"ffff").is_none(), 1); // Negation. let val_minus_7 = neg(&val_7); @@ -667,12 +711,12 @@ module std::bn254_algebra { // division. let val_0 = from_u64(0); - assert!(eq(&val_7, &std::option::extract(&mut div(&val_63, &val_9))), 1); - assert!(std::option::is_none(&div(&val_63, &val_0)), 1); + assert!(eq(&val_7, &div(&val_63, &val_9).extract()), 1); + assert!(div(&val_63, &val_0).is_none(), 1); // Inversion. assert!(eq(&val_minus_7, &neg(&val_7)), 1); - assert!(std::option::is_none(&inv(&val_0)), 1); + assert!(inv(&val_0).is_none(), 1); // Squaring. let val_x = rand_insecure(); @@ -705,22 +749,30 @@ module std::bn254_algebra { assert!(FQ_VAL_0_SERIALIZED_LSB == serialize(&val_0), 1); assert!(FQ_VAL_1_SERIALIZED_LSB == serialize(&val_1), 1); let val_7 = from_u64(7); - let val_7_2nd = std::option::extract(&mut deserialize(&FQ_VAL_7_SERIALIZED_LSB)); - let val_7_3rd = std::option::extract(&mut deserialize(&FQ_VAL_7_SERIALIZED_MSB)); + let val_7_2nd = deserialize(&FQ_VAL_7_SERIALIZED_LSB).extract(); + let val_7_3rd = deserialize(&FQ_VAL_7_SERIALIZED_MSB).extract(); assert!(eq(&val_7, &val_7_2nd), 1); assert!(eq(&val_7, &val_7_3rd), 1); assert!(FQ_VAL_7_SERIALIZED_LSB == serialize(&val_7), 1); assert!(FQ_VAL_7_SERIALIZED_MSB == serialize(&val_7), 1); // Deserialization should fail if given a byte array of right size but the value is not a member. - assert!(std::option::is_none(&deserialize(&x"47fd7cd8168c203c8dca7168916a81975d588181b64550b829a031e1724e6430")), 1); - assert!(std::option::is_none(&deserialize(&x"30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47")), 1); + assert!( + deserialize(&x"47fd7cd8168c203c8dca7168916a81975d588181b64550b829a031e1724e6430").is_none( + ), 1); + assert!( + deserialize(&x"30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47").is_none( + ), 1); // Deserialization should fail if given a byte array of wrong size. - assert!(std::option::is_none(&deserialize(&x"46fd7cd8168c203c8dca7168916a81975d588181b64550b829a031e1724e643000")), 1); - assert!(std::option::is_none(&deserialize(&x"30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd4600")), 1); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); - assert!(std::option::is_none(&deserialize(&x"ffff")), 1); + assert!( + deserialize(&x"46fd7cd8168c203c8dca7168916a81975d588181b64550b829a031e1724e643000").is_none( + ), 1); + assert!( + deserialize(&x"30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd4600").is_none( + ), 1); + assert!(deserialize(&x"ffff").is_none(), 1); + assert!(deserialize(&x"ffff").is_none(), 1); // Negation. let val_minus_7 = neg(&val_7); @@ -740,12 +792,12 @@ module std::bn254_algebra { // division. let val_0 = from_u64(0); - assert!(eq(&val_7, &std::option::extract(&mut div(&val_63, &val_9))), 1); - assert!(std::option::is_none(&div(&val_63, &val_0)), 1); + assert!(eq(&val_7, &div(&val_63, &val_9).extract()), 1); + assert!(div(&val_63, &val_0).is_none(), 1); // Inversion. assert!(eq(&val_minus_7, &neg(&val_7)), 1); - assert!(std::option::is_none(&inv(&val_0)), 1); + assert!(inv(&val_0).is_none(), 1); // Squaring. let val_x = rand_insecure(); @@ -833,7 +885,7 @@ module std::bn254_algebra { let remaining = G1_NUM_MAX; while (remaining > 0) { zero(); - remaining = remaining - 1; + remaining -= 1; } } @@ -844,7 +896,7 @@ module std::bn254_algebra { let remaining = G1_NUM_MAX + 1; while (remaining > 0) { zero(); - remaining = remaining - 1; + remaining -= 1; } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/crypto_algebra.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/crypto_algebra.move index b31f028f83c05..3a7e2f049f00a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/crypto_algebra.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/crypto_algebra.move @@ -287,7 +287,7 @@ module aptos_std::crypto_algebra { } fun handles_from_elements(elements: &vector>): vector { - let num_elements = std::vector::length(elements); + let num_elements = elements.length(); let element_handles = std::vector::empty(); let i = 0; while ({ @@ -297,8 +297,8 @@ module aptos_std::crypto_algebra { }; i < num_elements }) { - std::vector::push_back(&mut element_handles, std::vector::borrow(elements, i).handle); - i = i + 1; + element_handles.push_back(elements[i].handle); + i += 1; }; element_handles } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/ed25519.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/ed25519.move index 0f8d9c8122970..028923475ff74 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/ed25519.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/ed25519.move @@ -71,7 +71,7 @@ module aptos_std::ed25519 { /// Parses the input 32 bytes as an *unvalidated* Ed25519 public key. public fun new_unvalidated_public_key_from_bytes(bytes: vector): UnvalidatedPublicKey { - assert!(std::vector::length(&bytes) == PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_WRONG_PUBKEY_SIZE)); + assert!(bytes.length() == PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_WRONG_PUBKEY_SIZE)); UnvalidatedPublicKey { bytes } } @@ -88,7 +88,7 @@ module aptos_std::ed25519 { /// Parses the input 64 bytes as a purported Ed25519 signature. public fun new_signature_from_bytes(bytes: vector): Signature { - assert!(std::vector::length(&bytes) == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_WRONG_SIGNATURE_SIZE)); + assert!(bytes.length() == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_WRONG_SIGNATURE_SIZE)); Signature { bytes } } @@ -168,7 +168,7 @@ module aptos_std::ed25519 { /// Derives the Aptos-specific authentication key of the given Ed25519 public key. fun public_key_bytes_to_authentication_key(pk_bytes: vector): vector { - std::vector::push_back(&mut pk_bytes, SIGNATURE_SCHEME_ID); + pk_bytes.push_back(SIGNATURE_SCHEME_ID); std::hash::sha3_256(pk_bytes) } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/federated_keyless.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/federated_keyless.move new file mode 100644 index 0000000000000..5963f8abae5fc --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/federated_keyless.move @@ -0,0 +1,65 @@ +/// This module implements the Federated Keyless authentication scheme. + +module aptos_std::federated_keyless { + use aptos_std::bcs_stream; + use aptos_std::keyless; + use aptos_std::error; + + #[test_only] + friend aptos_std::federated_keyless_tests; + + // + // Error codes + // + + /// There are extra bytes in the input when deserializing a Federated Keyless public key. + const E_INVALID_FEDERATED_KEYLESS_PUBLIC_KEY_EXTRA_BYTES: u64 = 1; + + // + // Constants + // + + // + // Structs + // + + /// An *unvalidated* any public key: not necessarily an elliptic curve point, just a sequence of 32 bytes + struct PublicKey has copy, drop, store { + jwk_address: address, + keyless_public_key: keyless::PublicKey, + } + + // + // Functions + // + + /// Parses the input bytes into a keyless public key. + public fun new_public_key_from_bytes(bytes: vector): PublicKey { + let stream = bcs_stream::new(bytes); + let pk = deserialize_public_key(&mut stream); + assert!(!bcs_stream::has_remaining(&mut stream), error::invalid_argument(E_INVALID_FEDERATED_KEYLESS_PUBLIC_KEY_EXTRA_BYTES)); + pk + } + + /// Deserializes a Federated Keyless public key from a BCS stream. + public fun deserialize_public_key(stream: &mut bcs_stream::BCSStream): PublicKey { + let jwk_address = bcs_stream::deserialize_address(stream); + let keyless_public_key = keyless::deserialize_public_key(stream); + PublicKey { keyless_public_key, jwk_address } + } + + /// Creates a new Federated Keyless public key from a keyless public key and a JWK address. + public fun new(keyless_public_key: keyless::PublicKey, jwk_address: address): PublicKey { + PublicKey { keyless_public_key, jwk_address } + } + + /// Returns the identifier bytes of the public key + friend fun get_jwk_address(self: &PublicKey): address { + self.jwk_address + } + + /// Returns the keyless public key of the public key + friend fun get_keyless_public_key(self: &PublicKey): keyless::PublicKey { + self.keyless_public_key + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/federated_keyless.spec.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/federated_keyless.spec.move new file mode 100644 index 0000000000000..f780e4d98b02a --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/federated_keyless.spec.move @@ -0,0 +1,5 @@ +spec aptos_std::federated_keyless { + spec module { + pragma verify = false; + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/keyless.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/keyless.move new file mode 100644 index 0000000000000..010aa425df253 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/keyless.move @@ -0,0 +1,81 @@ +/// This module implements the Keyless authentication scheme. + +module aptos_std::keyless { + use aptos_std::bcs_stream::{Self, deserialize_u8}; + use std::error; + use std::string::{Self, String}; + friend aptos_std::federated_keyless; + + #[test_only] + friend aptos_std::keyless_tests; + #[test_only] + friend aptos_std::federated_keyless_tests; + + // Error codes + // + + /// There are extra bytes in the input when deserializing a Keyless public key. + const E_INVALID_KEYLESS_PUBLIC_KEY_EXTRA_BYTES: u64 = 1; + + /// The length of the identifier commitment bytes in a Keyless public key is invalid. + const E_INVALID_ID_COMMITMENT_BYTES_LENGTH: u64 = 2; + + /// The length of the issuer string in a Keyless public key is invalid. + const E_INVALID_ISSUER_UTF8_BYTES_LENGTH: u64 = 3; + + // + // Constants + // + + /// The length of the identifier commitment bytes in a Keyless public key. + const ID_COMMITMENT_BYTES_LENGTH: u64 = 32; + + /// The maximum length of the issuer string in bytes in a Keyless public key. + const MAX_ISSUER_UTF8_BYTES_LENGTH: u64 = 120; + + // + // Structs + // + + /// An *unvalidated* any public key: not necessarily an elliptic curve point, just a sequence of 32 bytes + struct PublicKey has copy, drop, store { + iss: String, + idc: vector + } + + // + // Functions + // + + /// Parses the input bytes into a keyless public key. + public fun new_public_key_from_bytes(bytes: vector): PublicKey { + let stream = bcs_stream::new(bytes); + let key = deserialize_public_key(&mut stream); + assert!(!bcs_stream::has_remaining(&mut stream), error::invalid_argument(E_INVALID_KEYLESS_PUBLIC_KEY_EXTRA_BYTES)); + key + } + + /// Deserializes a keyless public key from a BCS stream. + public fun deserialize_public_key(stream: &mut bcs_stream::BCSStream): PublicKey { + let iss = bcs_stream::deserialize_string(stream); + let idc = bcs_stream::deserialize_vector(stream, |x| deserialize_u8(x)); + new(iss, idc) + } + + /// Creates a new keyless public key from an issuer string and an identifier bytes. + public fun new(iss: String, idc: vector): PublicKey { + assert!(string::bytes(&iss).length() <= MAX_ISSUER_UTF8_BYTES_LENGTH, error::invalid_argument(E_INVALID_ISSUER_UTF8_BYTES_LENGTH)); + assert!(idc.length() == ID_COMMITMENT_BYTES_LENGTH, error::invalid_argument(E_INVALID_ID_COMMITMENT_BYTES_LENGTH)); + PublicKey { iss, idc } + } + + /// Returns the issuer string of the public key + friend fun get_iss(self: &PublicKey): String { + self.iss + } + + /// Returns the identifier bytes of the public key + friend fun get_idc(self: &PublicKey): vector { + self.idc + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/keyless.spec.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/keyless.spec.move new file mode 100644 index 0000000000000..6ebc8cd045dc4 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/keyless.spec.move @@ -0,0 +1,5 @@ +spec aptos_std::keyless { + spec module { + pragma verify = false; + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_ed25519.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_ed25519.move index f1f97bc635720..e1f6b60e162b7 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_ed25519.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_ed25519.move @@ -6,7 +6,6 @@ module aptos_std::multi_ed25519 { use std::error; use std::features; use std::option::{Self, Option}; - use std::vector; use aptos_std::ed25519; // @@ -127,7 +126,7 @@ module aptos_std::multi_ed25519 { /// leave it as is and continue assuming `UnvalidatedPublicKey` objects could be invalid PKs that will safely be /// rejected during signature verification. public fun new_unvalidated_public_key_from_bytes(bytes: vector): UnvalidatedPublicKey { - let len = vector::length(&bytes); + let len = bytes.length(); let num_sub_pks = len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES; assert!(num_sub_pks <= MAX_NUMBER_OF_PUBLIC_KEYS, error::invalid_argument(E_WRONG_PUBKEY_SIZE)); @@ -140,7 +139,7 @@ module aptos_std::multi_ed25519 { /// (Incorrectly) parses the input bytes as a *validated* MultiEd25519 public key. public fun new_validated_public_key_from_bytes(bytes: vector): Option { // Note that `public_key_validate_internal` will check that `vector::length(&bytes) / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES <= MAX_NUMBER_OF_PUBLIC_KEYS`. - if (vector::length(&bytes) % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES == THRESHOLD_SIZE_BYTES && + if (bytes.length() % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES == THRESHOLD_SIZE_BYTES && public_key_validate_internal(bytes)) { option::some(ValidatedPublicKey { bytes @@ -167,7 +166,8 @@ module aptos_std::multi_ed25519 { /// Parses the input bytes as a purported MultiEd25519 multi-signature. public fun new_signature_from_bytes(bytes: vector): Signature { - assert!(vector::length(&bytes) % INDIVIDUAL_SIGNATURE_NUM_BYTES == BITMAP_NUM_OF_BYTES, error::invalid_argument(E_WRONG_SIGNATURE_SIZE)); + assert!( + bytes.length() % INDIVIDUAL_SIGNATURE_NUM_BYTES == BITMAP_NUM_OF_BYTES, error::invalid_argument(E_WRONG_SIGNATURE_SIZE)); Signature { bytes } } @@ -244,7 +244,7 @@ module aptos_std::multi_ed25519 { /// We provide this API as a cheaper alternative to calling `public_key_validate` and then `validated_public_key_num_sub_pks` /// when the input `pk` is known to be valid. public fun unvalidated_public_key_num_sub_pks(pk: &UnvalidatedPublicKey): u8 { - let len = vector::length(&pk.bytes); + let len = pk.bytes.length(); ((len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES) as u8) } @@ -263,15 +263,15 @@ module aptos_std::multi_ed25519 { /// Returns the number n of sub-PKs in a validated t-out-of-n MultiEd25519 PK. /// Since the format of this PK has been validated, the returned # of sub-PKs is guaranteed to be correct. public fun validated_public_key_num_sub_pks(pk: &ValidatedPublicKey): u8 { - let len = vector::length(&pk.bytes); + let len = pk.bytes.length(); ((len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES) as u8) } /// Returns the number t of sub-PKs in a validated t-out-of-n MultiEd25519 PK (i.e., the threshold). public fun validated_public_key_threshold(pk: &ValidatedPublicKey): u8 { - let len = vector::length(&pk.bytes); - let threshold_byte = *vector::borrow(&pk.bytes, len - 1); + let len = pk.bytes.length(); + let threshold_byte = pk.bytes[len - 1]; threshold_byte } @@ -280,14 +280,14 @@ module aptos_std::multi_ed25519 { /// (All `ValidatedPublicKey` objects are guaranteed to pass this check.) /// Returns the threshold t <= n of the PK. public fun check_and_get_threshold(bytes: vector): Option { - let len = vector::length(&bytes); + let len = bytes.length(); if (len == 0) { return option::none() }; let threshold_num_of_bytes = len % INDIVIDUAL_PUBLIC_KEY_NUM_BYTES; let num_of_keys = len / INDIVIDUAL_PUBLIC_KEY_NUM_BYTES; - let threshold_byte = *vector::borrow(&bytes, len - 1); + let threshold_byte = bytes[len - 1]; if (num_of_keys == 0 || num_of_keys > MAX_NUMBER_OF_PUBLIC_KEYS || threshold_num_of_bytes != 1) { return option::none() @@ -300,7 +300,7 @@ module aptos_std::multi_ed25519 { /// Derives the Aptos-specific authentication key of the given Ed25519 public key. fun public_key_bytes_to_authentication_key(pk_bytes: vector): vector { - vector::push_back(&mut pk_bytes, SIGNATURE_SCHEME_ID); + pk_bytes.push_back(SIGNATURE_SCHEME_ID); std::hash::sha3_256(pk_bytes) } @@ -358,8 +358,8 @@ module aptos_std::multi_ed25519 { #[test_only] public fun maul_first_signature(sig: &mut Signature) { - let first_sig_byte = vector::borrow_mut(&mut sig.bytes, 0); - *first_sig_byte = *first_sig_byte ^ 0xff; + let first_sig_byte = sig.bytes.borrow_mut(0); + *first_sig_byte ^= 0xff; } @@ -367,18 +367,18 @@ module aptos_std::multi_ed25519 { fun bugfix_validated_pk_from_zero_subpks(fx: signer) { features::change_feature_flags_for_testing(&fx, vector[ features::multi_ed25519_pk_validate_v2_feature()], vector[]); let bytes = vector[1u8]; - assert!(vector::length(&bytes) == 1, 1); + assert!(bytes.length() == 1, 1); // Try deserializing a MultiEd25519 `ValidatedPublicKey` with 0 Ed25519 sub-PKs and 1 threshold byte. // This would ideally NOT succeed, but it currently does. Regardless, such invalid PKs will be safely dismissed // during signature verification. let some = new_validated_public_key_from_bytes(bytes); - assert!(option::is_none(&check_and_get_threshold(bytes)), 1); // ground truth - assert!(option::is_some(&some), 2); // incorrect + assert!(check_and_get_threshold(bytes).is_none(), 1); // ground truth + assert!(some.is_some(), 2); // incorrect // In contrast, the v2 API will fail deserializing, as it should. let none = new_validated_public_key_from_bytes_v2(bytes); - assert!(option::is_none(&none), 3); + assert!(none.is_none(), 3); } #[test(fx = @std)] @@ -387,17 +387,17 @@ module aptos_std::multi_ed25519 { let (_, subpk) = ed25519::generate_keys(); let bytes = ed25519::validated_public_key_to_bytes(&subpk); - assert!(vector::length(&bytes) == INDIVIDUAL_PUBLIC_KEY_NUM_BYTES, 1); + assert!(bytes.length() == INDIVIDUAL_PUBLIC_KEY_NUM_BYTES, 1); // Try deserializing a MultiEd25519 `ValidatedPublicKey` with 1 Ed25519 sub-PKs but no threshold byte, which // will not succeed, let none = new_validated_public_key_from_bytes(bytes); - assert!(option::is_none(&check_and_get_threshold(bytes)), 1); // ground truth - assert!(option::is_none(&none), 2); // correct + assert!(check_and_get_threshold(bytes).is_none(), 1); // ground truth + assert!(none.is_none(), 2); // correct // Similarly, the v2 API will also fail deserializing. let none = new_validated_public_key_from_bytes_v2(bytes); - assert!(option::is_none(&none), 3); // also correct + assert!(none.is_none(), 3); // also correct } #[test(fx = @std)] @@ -408,28 +408,28 @@ module aptos_std::multi_ed25519 { 0, 0, 1, ]; - assert!(option::extract(&mut check_and_get_threshold(torsion_point_with_threshold_1)) == 1, 1); + assert!(check_and_get_threshold(torsion_point_with_threshold_1).extract() == 1, 1); // Try deserializing a MultiEd25519 `ValidatedPublicKey` with 1 Ed25519 sub-PKs and 1 threshold byte, as it should, // except the sub-PK is of small order. This should not succeed, let none = new_validated_public_key_from_bytes(torsion_point_with_threshold_1); - assert!(option::is_none(&none), 2); + assert!(none.is_none(), 2); // Similarly, the v2 API will also fail deserializing. let none = new_validated_public_key_from_bytes_v2(torsion_point_with_threshold_1); - assert!(option::is_none(&none), 3); + assert!(none.is_none(), 3); } #[test] fun test_gen_sign_verify() { let thresholds = vector[1, 1, 2, 2, 3, 15,]; // the thresholds, implicitly encoded in the public keys let party_counts = vector[1, 2, 2, 3, 10, 32,]; - let test_case_count = vector::length(&party_counts); + let test_case_count = party_counts.length(); let test_case_idx = 0; while (test_case_idx < test_case_count) { - let threshold = *vector::borrow(&thresholds, test_case_idx); - let group_size = *vector::borrow(&party_counts, test_case_idx); + let threshold = thresholds[test_case_idx]; + let group_size = party_counts[test_case_idx]; let (sk, pk) = generate_keys(threshold, group_size); assert!(validated_public_key_threshold(&pk) == threshold, 1); @@ -437,7 +437,7 @@ module aptos_std::multi_ed25519 { assert!(public_key_validate_v2_internal(pk.bytes), 3); let upk = public_key_into_unvalidated(pk); - assert!(option::extract(&mut unvalidated_public_key_threshold(&upk)) == threshold, 4); + assert!(unvalidated_public_key_threshold(&upk).extract() == threshold, 4); assert!(unvalidated_public_key_num_sub_pks(&upk) == group_size, 5); let msg1 = b"Hello Aptos!"; @@ -451,7 +451,7 @@ module aptos_std::multi_ed25519 { let sig2 = sign_struct(&sk, copy obj2); assert!(signature_verify_strict_t(&sig2, &upk, copy obj2), 7); - test_case_idx = test_case_idx + 1; + test_case_idx += 1; } } @@ -463,7 +463,7 @@ module aptos_std::multi_ed25519 { assert!(public_key_validate_v2_internal(pk.bytes), 3); let upk = public_key_into_unvalidated(pk); - assert!(option::extract(&mut unvalidated_public_key_threshold(&upk)) == 4, 4); + assert!(unvalidated_public_key_threshold(&upk).extract() == 4, 4); assert!(unvalidated_public_key_num_sub_pks(&upk) == 5, 5); let msg1 = b"Hello Aptos!"; diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_key.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_key.move new file mode 100644 index 0000000000000..b341faf321f6d --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_key.move @@ -0,0 +1,89 @@ +/// This module implements MultiKey type of public key. +/// A MultiKey public key is a collection of single key public keys and a number representing the number of signatures required to authenticate a transaction. +/// Unlike MultiEd25519, the individual single keys can be of different schemes. +module aptos_std::multi_key { + use aptos_std::single_key; + use std::hash; + use std::error; + use std::bcs_stream; + use std::bcs; + // + // Error codes + // + + /// No keys were provided when creating a MultiKey public key. + const E_INVALID_MULTI_KEY_NO_KEYS: u64 = 1; + + /// The number of keys provided is greater than the maximum allowed. + const E_INVALID_MULTI_KEY_TOO_MANY_KEYS: u64 = 2; + + /// The number of signatures required is greater than the number of keys provided. + const E_INVALID_MULTI_KEY_SIGNATURES_REQUIRED: u64 = 3; + + /// There are extra bytes in the input when deserializing a MultiKey public key. + const E_INVALID_MULTI_KEY_EXTRA_BYTES: u64 = 4; + + // + // Constants + // + + /// The identifier of the MultiEd25519 signature scheme, which is used when deriving Aptos authentication keys by hashing + /// it together with an MultiEd25519 public key. + const SIGNATURE_SCHEME_ID: u8 = 3; + + /// Max number of ed25519 public keys allowed in multi-ed25519 keys + const MAX_NUMBER_OF_PUBLIC_KEYS: u64 = 32; + + /// An *unvalidated*, k out of n MultiKey public key. The `bytes` field contains (1) a vector of single key public keys and + /// (2) a single byte encoding the threshold k. + /// *Unvalidated* means there is no guarantee that the underlying PKs are valid elliptic curve points of non-small + /// order. Nor is there a guarantee that it would deserialize correctly (i.e., for Keyless public keys). + struct MultiKey has copy, drop, store { + public_keys: vector, + signatures_required: u8 + } + + // + // Functions + // + + /// Parses the input bytes into a MultiKey public key. + public fun new_public_key_from_bytes(bytes: vector): MultiKey { + let stream = bcs_stream::new(bytes); + let pk = deserialize_multi_key(&mut stream); + assert!(!bcs_stream::has_remaining(&mut stream), error::invalid_argument(E_INVALID_MULTI_KEY_EXTRA_BYTES)); + pk + } + + /// Creates a new MultiKey public key from a vector of single key public keys and a number representing the number of signatures required to authenticate a transaction. + public fun new_multi_key_from_single_keys(single_keys: vector, signatures_required: u8): MultiKey { + let num_keys = single_keys.length(); + assert!( + num_keys > 0, + error::invalid_argument(E_INVALID_MULTI_KEY_NO_KEYS) + ); + assert!( + num_keys <= MAX_NUMBER_OF_PUBLIC_KEYS, + error::invalid_argument(E_INVALID_MULTI_KEY_TOO_MANY_KEYS) + ); + assert!( + (signatures_required as u64) <= num_keys, + error::invalid_argument(E_INVALID_MULTI_KEY_SIGNATURES_REQUIRED) + ); + MultiKey { public_keys: single_keys, signatures_required } + } + + /// Deserializes a MultiKey public key from a BCS stream. + public fun deserialize_multi_key(stream: &mut bcs_stream::BCSStream): MultiKey { + let public_keys = bcs_stream::deserialize_vector(stream, |x| single_key::deserialize_any_public_key(x)); + let signatures_required = bcs_stream::deserialize_u8(stream); + MultiKey { public_keys, signatures_required } + } + + /// Returns the authentication key for a MultiKey public key. + public fun to_authentication_key(self: &MultiKey): vector { + let pk_bytes = bcs::to_bytes(self); + pk_bytes.push_back(SIGNATURE_SCHEME_ID); + hash::sha3_256(pk_bytes) + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_key.spec.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_key.spec.move new file mode 100644 index 0000000000000..48ef610e895b4 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/multi_key.spec.move @@ -0,0 +1,5 @@ +spec aptos_std::multi_key { + spec module { + pragma verify = false; + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255.move index 79905c5786eb8..5481a9c5d5a60 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255.move @@ -41,9 +41,6 @@ module aptos_std::ristretto255 { use std::features; use std::option::Option; - #[test_only] - use std::option; - // // Constants // @@ -203,7 +200,7 @@ module aptos_std::ristretto255 { /// Samples a uniformly-at-random RistrettoPoint given a sequence of 64 uniformly-at-random bytes. This function /// can be used to build a collision-resistant hash function that maps 64-byte messages to RistrettoPoint's. public fun new_point_from_64_uniform_bytes(bytes: vector): Option { - if (std::vector::length(&bytes) == 64) { + if (bytes.length() == 64) { std::option::some(RistrettoPoint { handle: new_point_from_64_uniform_bytes_internal(bytes) }) @@ -322,9 +319,10 @@ module aptos_std::ristretto255 { /// Computes a multi-scalar multiplication, returning a_1 p_1 + a_2 p_2 + ... + a_n p_n. /// This function is much faster than computing each a_i p_i using `point_mul` and adding up the results using `point_add`. public fun multi_scalar_mul(points: &vector, scalars: &vector): RistrettoPoint { - assert!(!std::vector::is_empty(points), std::error::invalid_argument(E_ZERO_POINTS)); - assert!(!std::vector::is_empty(scalars), std::error::invalid_argument(E_ZERO_SCALARS)); - assert!(std::vector::length(points) == std::vector::length(scalars), std::error::invalid_argument(E_DIFFERENT_NUM_POINTS_AND_SCALARS)); + assert!(!points.is_empty(), std::error::invalid_argument(E_ZERO_POINTS)); + assert!(!scalars.is_empty(), std::error::invalid_argument(E_ZERO_SCALARS)); + assert!( + points.length() == scalars.length(), std::error::invalid_argument(E_DIFFERENT_NUM_POINTS_AND_SCALARS)); RistrettoPoint { handle: multi_scalar_mul_internal(points, scalars) @@ -364,9 +362,7 @@ module aptos_std::ristretto255 { /// Creates a Scalar from an u8. public fun new_scalar_from_u8(byte: u8): Scalar { let s = scalar_zero(); - let byte_zero = std::vector::borrow_mut(&mut s.data, 0); - *byte_zero = byte; - + s.data[0] = byte; s } @@ -393,7 +389,7 @@ module aptos_std::ristretto255 { /// Creates a Scalar from 32 bytes by reducing the little-endian-encoded number in those bytes modulo $\ell$. public fun new_scalar_reduced_from_32_bytes(bytes: vector): Option { - if (std::vector::length(&bytes) == 32) { + if (bytes.length() == 32) { std::option::some(Scalar { data: scalar_reduced_from_32_bytes_internal(bytes) }) @@ -405,7 +401,7 @@ module aptos_std::ristretto255 { /// Samples a scalar uniformly-at-random given 64 uniform-at-random bytes as input by reducing the little-endian-encoded number /// in those bytes modulo $\ell$. public fun new_scalar_uniform_from_64_bytes(bytes: vector): Option { - if (std::vector::length(&bytes) == 64) { + if (bytes.length() == 64) { std::option::some(Scalar { data: scalar_uniform_from_64_bytes_internal(bytes) }) @@ -693,13 +689,13 @@ module aptos_std::ristretto255 { #[test] fun test_point_decompression() { let compressed = new_compressed_point_from_bytes(A_POINT); - assert!(std::option::is_some(&compressed), 1); + assert!(compressed.is_some(), 1); let point = new_point_from_bytes(A_POINT); - assert!(std::option::is_some(&point), 1); + assert!(point.is_some(), 1); - let point = std::option::extract(&mut point); - let compressed = std::option::extract(&mut compressed); + let point = point.extract(); + let compressed = compressed.extract(); let same_point = point_decompress(&compressed); assert!(point_equals(&point, &same_point), 1); @@ -708,8 +704,8 @@ module aptos_std::ristretto255 { #[test] fun test_point_equals() { let g = basepoint(); - let same_g = std::option::extract(&mut new_point_from_bytes(BASE_POINT)); - let ag = std::option::extract(&mut new_point_from_bytes(A_TIMES_BASE_POINT)); + let same_g = new_point_from_bytes(BASE_POINT).extract(); + let ag = new_point_from_bytes(A_TIMES_BASE_POINT).extract(); assert!(point_equals(&g, &same_g), 1); assert!(!point_equals(&g, &ag), 1); @@ -720,9 +716,9 @@ module aptos_std::ristretto255 { // fetch g let g = basepoint(); // fetch a - let a = std::option::extract(&mut new_scalar_from_bytes(A_SCALAR)); + let a = new_scalar_from_bytes(A_SCALAR).extract(); // fetch expected a*g - let ag = std::option::extract(&mut new_point_from_bytes(A_TIMES_BASE_POINT)); + let ag = new_point_from_bytes(A_TIMES_BASE_POINT).extract(); // compute a*g let p = point_mul(&g, &a); @@ -741,9 +737,9 @@ module aptos_std::ristretto255 { let g = basepoint(); assert!(g.handle == 0, 1); - let a = std::option::extract(&mut new_scalar_from_bytes(A_SCALAR)); + let a = new_scalar_from_bytes(A_SCALAR).extract(); - let ag = std::option::extract(&mut new_point_from_bytes(A_TIMES_BASE_POINT)); + let ag = new_point_from_bytes(A_TIMES_BASE_POINT).extract(); assert!(ag.handle == 1, 1); assert!(!point_equals(&g, &ag), 1); @@ -763,13 +759,13 @@ module aptos_std::ristretto255 { #[test] fun test_point_add() { // fetch a - let a = std::option::extract(&mut new_point_from_bytes(A_POINT)); + let a = new_point_from_bytes(A_POINT).extract(); // fetch b - let b = std::option::extract(&mut new_point_from_bytes(B_POINT)); + let b = new_point_from_bytes(B_POINT).extract(); // fetch expected a + b - let a_plus_b = std::option::extract(&mut new_point_from_bytes(A_PLUS_B_POINT)); + let a_plus_b = new_point_from_bytes(A_PLUS_B_POINT).extract(); // compute a*g let result = point_add(&a, &b); @@ -812,25 +808,25 @@ module aptos_std::ristretto255 { // create extra RistrettoPoints here, so as to generate different PointStore layouts inside the native Rust implementation let c = before_a_gap; while (c > 0) { - let _ignore = std::option::extract(&mut new_point_from_bytes(BASE_POINT)); + let _ignore = new_point_from_bytes(BASE_POINT).extract(); - c = c - 1; + c -= 1; }; // fetch a - let a = std::option::extract(&mut new_point_from_bytes(A_POINT)); + let a = new_point_from_bytes(A_POINT).extract(); // create extra RistrettoPoints here, so as to generate different PointStore layouts inside the native Rust implementation let c = before_b_gap; while (c > 0) { - let _ignore = std::option::extract(&mut new_point_from_bytes(BASE_POINT)); + let _ignore = new_point_from_bytes(BASE_POINT).extract(); - c = c - 1; + c -= 1; }; // fetch b - let b = std::option::extract(&mut new_point_from_bytes(B_POINT)); + let b = new_point_from_bytes(B_POINT).extract(); - let a_plus_b = std::option::extract(&mut new_point_from_bytes(A_PLUS_B_POINT)); + let a_plus_b = new_point_from_bytes(A_PLUS_B_POINT).extract(); // sanity-check the handles assert!(a.handle == before_a_gap, 1); @@ -856,13 +852,13 @@ module aptos_std::ristretto255 { #[test] fun test_point_sub() { // fetch a - let a = std::option::extract(&mut new_point_from_bytes(A_POINT)); + let a = new_point_from_bytes(A_POINT).extract(); // fetch b - let b = std::option::extract(&mut new_point_from_bytes(B_POINT)); + let b = new_point_from_bytes(B_POINT).extract(); // fetch expected a + b - let a_plus_b = std::option::extract(&mut new_point_from_bytes(A_PLUS_B_POINT)); + let a_plus_b = new_point_from_bytes(A_PLUS_B_POINT).extract(); // compute a*g let result = point_sub(&a_plus_b, &b); @@ -882,7 +878,7 @@ module aptos_std::ristretto255 { #[test] fun test_point_neg() { - let a = std::option::extract(&mut new_point_from_bytes(A_POINT)); + let a = new_point_from_bytes(A_POINT).extract(); let neg_a = point_neg(&a); @@ -909,10 +905,11 @@ module aptos_std::ristretto255 { fun test_basepoint_double_mul(fx: signer) { features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_feature() ], vector[]); - let expected = option::extract(&mut new_point_from_bytes(x"be5d615d8b8f996723cdc6e1895b8b6d312cc75d1ffb0259873b99396a38c05a")); + let expected = new_point_from_bytes(x"be5d615d8b8f996723cdc6e1895b8b6d312cc75d1ffb0259873b99396a38c05a").extract( + ); let a = Scalar { data: A_SCALAR }; - let a_point = option::extract(&mut new_point_from_bytes(A_POINT)); + let a_point = new_point_from_bytes(A_POINT).extract(); let b = Scalar { data: B_SCALAR }; let actual = basepoint_double_mul(&a, &a_point, &b); @@ -958,7 +955,7 @@ module aptos_std::ristretto255 { ]; let result = multi_scalar_mul(&points, &scalars); - let expected = std::option::extract(&mut new_point_from_bytes(A_TIMES_BASE_POINT)); + let expected = new_point_from_bytes(A_TIMES_BASE_POINT).extract(); assert!(point_equals(&result, &expected), 1); } @@ -978,9 +975,9 @@ module aptos_std::ristretto255 { let result = multi_scalar_mul(&points, &scalars); let expected = basepoint_double_mul( - std::vector::borrow(&scalars, 0), + scalars.borrow(0), &basepoint(), - std::vector::borrow(&scalars, 1)); + scalars.borrow(1)); assert!(point_equals(&result, &expected), 1); } @@ -1003,7 +1000,8 @@ module aptos_std::ristretto255 { new_point_from_sha2_512(b"5"), ]; - let expected = std::option::extract(&mut new_point_from_bytes(x"c4a98fbe6bd0f315a0c150858aec8508be397443093e955ef982e299c1318928")); + let expected = new_point_from_bytes(x"c4a98fbe6bd0f315a0c150858aec8508be397443093e955ef982e299c1318928").extract( + ); let result = multi_scalar_mul(&points, &scalars); assert!(point_equals(&expected, &result), 1); @@ -1012,7 +1010,8 @@ module aptos_std::ristretto255 { #[test] fun test_new_point_from_sha2_512() { let msg = b"To really appreciate architecture, you may even need to commit a murder"; - let expected = option::extract(&mut new_point_from_bytes(x"baaa91eb43e5e2f12ffc96347e14bc458fdb1772b2232b08977ee61ea9f84e31")); + let expected = new_point_from_bytes(x"baaa91eb43e5e2f12ffc96347e14bc458fdb1772b2232b08977ee61ea9f84e31").extract( + ); assert!(point_equals(&expected, &new_point_from_sha2_512(msg)), 1); } @@ -1020,9 +1019,10 @@ module aptos_std::ristretto255 { #[test] fun test_new_point_from_64_uniform_bytes() { let bytes_64 = x"baaa91eb43e5e2f12ffc96347e14bc458fdb1772b2232b08977ee61ea9f84e31e87feda199d72b83de4f5b2d45d34805c57019c6c59c42cb70ee3d19aa996f75"; - let expected = option::extract(&mut new_point_from_bytes(x"4a8e429f906478654232d7ae180ad60854754944ac67f38e20d8fa79e4b7d71e")); + let expected = new_point_from_bytes(x"4a8e429f906478654232d7ae180ad60854754944ac67f38e20d8fa79e4b7d71e").extract( + ); - let point = option::extract(&mut new_point_from_64_uniform_bytes(bytes_64)); + let point = new_point_from_64_uniform_bytes(bytes_64).extract(); assert!(point_equals(&expected, &point), 1); } @@ -1046,31 +1046,31 @@ module aptos_std::ristretto255 { /// Tests deserializing a Scalar from a sequence of canonical bytes fun test_scalar_from_canonical_bytes() { // Too few bytes - assert!(std::option::is_none(&new_scalar_from_bytes(x"00")), 1); + assert!(new_scalar_from_bytes(x"00").is_none(), 1); // 32 zero bytes are canonical - assert!(std::option::is_some(&new_scalar_from_bytes(x"0000000000000000000000000000000000000000000000000000000000000000")), 1); + assert!(new_scalar_from_bytes(x"0000000000000000000000000000000000000000000000000000000000000000").is_some(), 1); // Non-canonical because unreduced - assert!(std::option::is_none(&new_scalar_from_bytes(x"1010101010101010101010101010101010101010101010101010101010101010")), 1); + assert!(new_scalar_from_bytes(x"1010101010101010101010101010101010101010101010101010101010101010").is_none(), 1); // Canonical because \ell - 1 - assert!(std::option::is_some(&new_scalar_from_bytes(L_MINUS_ONE)), 1); + assert!(new_scalar_from_bytes(L_MINUS_ONE).is_some(), 1); // Non-canonical because \ell - assert!(std::option::is_none(&new_scalar_from_bytes(ORDER_ELL)), 1); + assert!(new_scalar_from_bytes(ORDER_ELL).is_none(), 1); // Non-canonical because \ell+1 - assert!(std::option::is_none(&new_scalar_from_bytes(L_PLUS_ONE)), 1); + assert!(new_scalar_from_bytes(L_PLUS_ONE).is_none(), 1); // Non-canonical because \ell+2 - assert!(std::option::is_none(&new_scalar_from_bytes(L_PLUS_TWO)), 1); + assert!(new_scalar_from_bytes(L_PLUS_TWO).is_none(), 1); // Non-canonical because high bit is set let non_canonical_highbit = vector[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]; let non_canonical_highbit_hex = x"0000000000000000000000000000000000000000000000000000000000000080"; assert!(non_canonical_highbit == non_canonical_highbit_hex, 1); - assert!(std::option::is_none(&new_scalar_from_bytes(non_canonical_highbit)), 1); + assert!(new_scalar_from_bytes(non_canonical_highbit).is_none(), 1); } #[test] @@ -1108,9 +1108,9 @@ module aptos_std::ristretto255 { let inv = scalar_invert(&s); // Technically, there is a negligible probability (i.e., 1/2^\ell) that s was zero and the call above returned None - assert!(std::option::is_some(&inv), 1); + assert!(inv.is_some(), 1); - let inv = std::option::extract(&mut inv); + let inv = inv.extract(); // Multiply s with s^{-1} and make sure you get one assert!(scalar_is_one(&scalar_mul(&s, &inv)), 1); @@ -1121,12 +1121,12 @@ module aptos_std::ristretto255 { fun test_scalar_from_sha2_512() { // Test a specific message hashes correctly to the field let str: vector = vector[]; - std::vector::append(&mut str, b"To really appreciate architecture, you may even need to commit a murder."); - std::vector::append(&mut str, b"While the programs used for The Manhattan Transcripts are of the most extreme"); - std::vector::append(&mut str, b"nature, they also parallel the most common formula plot: the archetype of"); - std::vector::append(&mut str, b"murder. Other phantasms were occasionally used to underline the fact that"); - std::vector::append(&mut str, b"perhaps all architecture, rather than being about functional standards, is"); - std::vector::append(&mut str, b"about love and death."); + str.append(b"To really appreciate architecture, you may even need to commit a murder."); + str.append(b"While the programs used for The Manhattan Transcripts are of the most extreme"); + str.append(b"nature, they also parallel the most common formula plot: the archetype of"); + str.append(b"murder. Other phantasms were occasionally used to underline the fact that"); + str.append(b"perhaps all architecture, rather than being about functional standards, is"); + str.append(b"about love and death."); let s = new_scalar_from_sha2_512(str); @@ -1143,21 +1143,21 @@ module aptos_std::ristretto255 { #[test] fun test_scalar_invert() { // Cannot invert zero - assert!(std::option::is_none(&scalar_invert(&scalar_zero())), 1); + assert!(scalar_invert(&scalar_zero()).is_none(), 1); // One's inverse is one let one = scalar_invert(&scalar_one()); - assert!(std::option::is_some(&one), 1); + assert!(one.is_some(), 1); - let one = std::option::extract(&mut one); + let one = one.extract(); assert!(scalar_is_one(&one), 1); // Test a random point X's inverse is correct let x = Scalar { data: X_SCALAR }; let xinv = scalar_invert(&x); - assert!(std::option::is_some(&xinv), 1); + assert!(xinv.is_some(), 1); - let xinv = std::option::extract(&mut xinv); + let xinv = xinv.extract(); let xinv_expected = Scalar { data: X_INV_SCALAR }; assert!(scalar_equals(&xinv, &xinv_expected), 1) @@ -1245,12 +1245,12 @@ module aptos_std::ristretto255 { #[test] fun test_scalar_reduced_from_32_bytes() { // \ell + 2 = 0 + 2 = 2 (modulo \ell) - let s = std::option::extract(&mut new_scalar_reduced_from_32_bytes(L_PLUS_TWO)); + let s = new_scalar_reduced_from_32_bytes(L_PLUS_TWO).extract(); let two = Scalar { data: TWO_SCALAR }; assert!(scalar_equals(&s, &two), 1); // Reducing the all 1's bit vector yields $(2^256 - 1) \mod \ell$ - let biggest = std::option::extract(&mut new_scalar_reduced_from_32_bytes(NON_CANONICAL_ALL_ONES)); + let biggest = new_scalar_reduced_from_32_bytes(NON_CANONICAL_ALL_ONES).extract(); assert!(scalar_equals(&biggest, &Scalar { data: REDUCED_2_256_MINUS_1_SCALAR }), 1); } @@ -1259,10 +1259,10 @@ module aptos_std::ristretto255 { // Test X + 2^256 * X reduces correctly let x_plus_2_to_256_times_x: vector = vector[]; - std::vector::append(&mut x_plus_2_to_256_times_x, X_SCALAR); - std::vector::append(&mut x_plus_2_to_256_times_x, X_SCALAR); + x_plus_2_to_256_times_x.append(X_SCALAR); + x_plus_2_to_256_times_x.append(X_SCALAR); - let reduced = std::option::extract(&mut new_scalar_uniform_from_64_bytes(x_plus_2_to_256_times_x)); + let reduced = new_scalar_uniform_from_64_bytes(x_plus_2_to_256_times_x).extract(); let expected = Scalar { data: REDUCED_X_PLUS_2_TO_256_TIMES_X_SCALAR }; assert!(scalar_equals(&reduced, &expected), 1) } @@ -1274,14 +1274,14 @@ module aptos_std::ristretto255 { // ...but if we maul it and set the high bit to 1, it is non-canonical let non_can = scalar_zero(); - let last_byte = std::vector::borrow_mut(&mut non_can.data, 31); + let last_byte = non_can.data.borrow_mut(31); *last_byte = 128; assert!(!scalar_is_canonical_internal(non_can.data), 1); // This test makes sure scalar_to_bytes does not return a mutable reference to a scalar's bits let non_can = scalar_zero(); let bytes = scalar_to_bytes(&scalar_zero()); - let last_byte = std::vector::borrow_mut(&mut bytes, 31); + let last_byte = bytes.borrow_mut(31); *last_byte = 128; assert!(scalar_is_canonical_internal(non_can.data), 1); assert!(scalar_equals(&non_can, &scalar_zero()), 1); @@ -1293,7 +1293,7 @@ module aptos_std::ristretto255 { let i = 0; while (i < limit) { point_identity(); - i = i + 1; + i += 1; } } @@ -1304,7 +1304,7 @@ module aptos_std::ristretto255 { let i = 0; while (i < limit) { point_identity(); - i = i + 1; + i += 1; } } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_bulletproofs.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_bulletproofs.move index 731ceabc74227..a222a19f34947 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_bulletproofs.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_bulletproofs.move @@ -3,6 +3,10 @@ /// A Bulletproof-based zero-knowledge range proof is a proof that a Pedersen commitment /// $c = v G + r H$ commits to an $n$-bit value $v$ (i.e., $v \in [0, 2^n)$). Currently, this module only supports /// $n \in \{8, 16, 32, 64\}$ for the number of bits. +/// +/// The module also supports batch range proofs, allowing verification of multiple commitments in a single proof. +/// Each commitment in the batch must satisfy the same range constraint $v \in [0, 2^n)$, and the supported batch +/// sizes are limited to $\{1, 2, 4, 8, 16\}$. module aptos_std::ristretto255_bulletproofs { use std::error; use std::features; @@ -29,8 +33,17 @@ module aptos_std::ristretto255_bulletproofs { /// The range proof system only supports proving ranges of type $[0, 2^b)$ where $b \in \{8, 16, 32, 64\}$. const E_RANGE_NOT_SUPPORTED: u64 = 3; + /// The range proof system only supports batch sizes of 1, 2, 4, 8, and 16. + const E_BATCH_SIZE_NOT_SUPPORTED: u64 = 4; + + /// The vector lengths of values and blinding factors do not match. + const E_VECTOR_LENGTHS_MISMATCH: u64 = 5; + + /// The domain separation tag exceeded the 256-byte limit. + const E_DST_TOO_LONG: u64 = 6; + /// The native functions have not been rolled out yet. - const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 4; + const E_NATIVE_FUN_NOT_AVAILABLE: u64 = 7; // // Structs @@ -71,26 +84,31 @@ module aptos_std::ristretto255_bulletproofs { /// /// WARNING: The DST check is VERY important for security as it prevents proofs computed for one application /// (a.k.a., a _domain_) with `dst_1` from verifying in a different application with `dst_2 != dst_1`. + /// + /// NOTE: currently, domain separation tags of size larger than 256 bytes are not supported. public fun verify_range_proof_pedersen(com: &pedersen::Commitment, proof: &RangeProof, num_bits: u64, dst: vector): bool { - assert!(features::bulletproofs_enabled(), error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE)); - - verify_range_proof_internal( - ristretto255::point_to_bytes(&pedersen::commitment_as_compressed_point(com)), + verify_range_proof( + pedersen::commitment_as_point(com), &ristretto255::basepoint(), &ristretto255::hash_to_point_base(), - proof.bytes, + proof, num_bits, dst ) } /// Verifies a zero-knowledge range proof that the value `v` committed in `com` (as v * val_base + r * rand_base, - /// for some randomness `r`) satisfies `v` in `[0, 2^num_bits)`. Only works for `num_bits` in `{8, 16, 32, 64}`. + /// for some randomness `r`) satisfies `v` in `[0, 2^num_bits)`. + /// + /// Only works for `num_bits` in `{8, 16, 32, 64}`. + /// + /// NOTE: currently, domain separation tags of size larger than 256 bytes are not supported. public fun verify_range_proof( com: &RistrettoPoint, val_base: &RistrettoPoint, rand_base: &RistrettoPoint, proof: &RangeProof, num_bits: u64, dst: vector): bool { assert!(features::bulletproofs_enabled(), error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE)); + assert!(dst.length() <= 256, error::invalid_argument(E_DST_TOO_LONG)); verify_range_proof_internal( ristretto255::point_to_bytes(&ristretto255::point_compress(com)), @@ -99,14 +117,68 @@ module aptos_std::ristretto255_bulletproofs { ) } + /// Verifies a zero-knowledge range proof for a batch of Pedersen commitments `comms` + /// (under the default Bulletproofs commitment key; see `pedersen::new_commitment_for_bulletproof`), + /// ensuring that all values `v` satisfy `v` in `[0, 2^num_bits)`. + /// Only works for `num_bits` in `{8, 16, 32, 64}` and batch size (length of `comms`) in `{1, 2, 4, 8, 16}`. + /// + /// NOTE: currently, domain separation tags of size larger than 256 bytes are not supported. + public fun verify_batch_range_proof_pedersen( + comms: &vector, proof: &RangeProof, + num_bits: u64, dst: vector): bool + { + verify_batch_range_proof( + &comms.map_ref(|com| ristretto255::point_clone(pedersen::commitment_as_point(com))), + &ristretto255::basepoint(), &ristretto255::hash_to_point_base(), + proof, + num_bits, + dst + ) + } + + // Verifies a zero-knowledge range proof for a batch of commitments `comms` (each of the form + /// `v * val_base + r * rand_base`), ensuring that all values `v` satisfy + /// `v` in `[0, 2^num_bits)`. Only works for `num_bits` in `{8, 16, 32, 64}` and batch size + /// (length of the `comms`) in `{1, 2, 4, 8, 16}`. + /// + /// NOTE: currently, domain separation tags of size larger than 256 bytes are not supported. + public fun verify_batch_range_proof( + comms: &vector, + val_base: &RistrettoPoint, rand_base: &RistrettoPoint, + proof: &RangeProof, num_bits: u64, dst: vector): bool + { + assert!(features::bulletproofs_batch_enabled(), error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE)); + assert!(dst.length() <= 256, error::invalid_argument(E_DST_TOO_LONG)); + + let comms = comms.map_ref(|com| ristretto255::point_to_bytes(&ristretto255::point_compress(com))); + + verify_batch_range_proof_internal( + comms, + val_base, rand_base, + proof.bytes, num_bits, dst + ) + } + #[test_only] /// Computes a range proof for the Pedersen commitment to 'val' with randomness 'r', under the default Bulletproofs /// commitment key; see `pedersen::new_commitment_for_bulletproof`. Returns the said commitment too. /// Only works for `num_bits` in `{8, 16, 32, 64}`. public fun prove_range_pedersen(val: &Scalar, r: &Scalar, num_bits: u64, dst: vector): (RangeProof, pedersen::Commitment) { - let (bytes, compressed_comm) = prove_range_internal(scalar_to_bytes(val), scalar_to_bytes(r), num_bits, dst, &ristretto255::basepoint(), &ristretto255::hash_to_point_base()); + prove_range(val, r, &ristretto255::basepoint(), &ristretto255::hash_to_point_base(), num_bits, dst) + } + + #[test_only] + /// Computes a range proof for a commitment to `val` with randomness `r`. + /// The commitment is of the form `val * val_base + r * rand_base`. + /// Returns both the commitment and the corresponding range proof. Only works for `num_bits` in `{8, 16, 32, 64}`. + public fun prove_range( + val: &Scalar, r: &Scalar, + val_base: &RistrettoPoint, rand_base: &RistrettoPoint, + num_bits: u64, dst: vector): (RangeProof, pedersen::Commitment) + { + let (bytes, compressed_comm) = prove_range_internal(scalar_to_bytes(val), scalar_to_bytes(r), num_bits, dst, val_base, rand_base); let point = ristretto255::new_compressed_point_from_bytes(compressed_comm); - let point = &std::option::extract(&mut point); + let point = &point.extract(); ( RangeProof { bytes }, @@ -114,6 +186,43 @@ module aptos_std::ristretto255_bulletproofs { ) } + #[test_only] + /// Computes a range proof for a batch of values `vals`, each committed with the corresponding randomness in `rs`, + /// under the default Bulletproofs commitment key; see `pedersen::new_commitment_for_bulletproof`. + /// Returns a tuple containing the batch range proof and a vector of said commitments. + /// Only works for `num_bits` in `{8, 16, 32, 64}` and batch sizes (length of `vals` and `rs`) in `{1, 2, 4, 8, 16}`. + public fun prove_batch_range_pedersen( + vals: &vector, rs: &vector, + num_bits: u64, dst: vector): (RangeProof, vector) + { + prove_batch_range(vals, rs, &ristretto255::basepoint(), &ristretto255::hash_to_point_base(), num_bits, dst) + } + + #[test_only] + /// Computes a range proof for a batch of values `vals`, each committed with the corresponding randomness in `rs`, + /// using custom base points `val_base` and `rand_base`. Each commitment is of the form `val * val_base + r * rand_base`. + /// Returns a tuple containing the batch range proof and a vector of said commitments. + /// Only works for `num_bits` in `{8, 16, 32, 64}` and batch sizes (length of `vals` and `rs`) in `{1, 2, 4, 8, 16}`. + public fun prove_batch_range( + vals: &vector, rs: &vector, + val_base: &RistrettoPoint, rand_base: &RistrettoPoint, + num_bits: u64, dst: vector): (RangeProof, vector) + { + let vals = vals.map_ref(|val| scalar_to_bytes(val)); + let rs = rs.map_ref(|r| scalar_to_bytes(r)); + + let (bytes, compressed_comms) = prove_batch_range_internal(vals, rs, num_bits, dst, val_base, rand_base); + let comms = compressed_comms.map(|compressed_comm| { + let comm = pedersen::new_commitment_from_bytes(compressed_comm); + comm.extract() + }); + + ( + RangeProof { bytes }, + comms + ) + } + // // Native functions // @@ -129,6 +238,19 @@ module aptos_std::ristretto255_bulletproofs { num_bits: u64, dst: vector): bool; + /// Aborts with `error::invalid_argument(E_DESERIALIZE_RANGE_PROOF)` if `proof` is not a valid serialization of a + /// range proof. + /// Aborts with `error::invalid_argument(E_RANGE_NOT_SUPPORTED)` if an unsupported `num_bits` is provided. + /// Aborts with `error::invalid_argument(E_BATCH_SIZE_NOT_SUPPORTED)` if an unsupported batch size is provided. + /// Aborts with `error::invalid_argument(E_VECTOR_LENGTHS_MISMATCH)` if the vector lengths of `comms` and `proof` do not match. + native fun verify_batch_range_proof_internal( + comms: vector>, + val_base: &RistrettoPoint, + rand_base: &RistrettoPoint, + proof: vector, + num_bits: u64, + dst: vector): bool; + #[test_only] /// Returns a tuple consisting of (1) a range proof for 'val' committed with randomness 'r' under the default Bulletproofs /// commitment key and (2) the commitment itself. @@ -143,38 +265,75 @@ module aptos_std::ristretto255_bulletproofs { val_base: &RistrettoPoint, rand_base: &RistrettoPoint): (vector, vector); + #[test_only] + /// Returns a tuple consisting of (1) a range proof for each value in `vals`, where each value is committed + /// with the corresponding randomness in `rs`, and (2) the corresponding commitments. + /// + /// Each commitment has the form `val * val_base + r * rand_base`, where `val` and `r` are the corresponding + /// elements from `vals` and `rs`, respectively. + /// + /// Aborts with `error::invalid_argument(E_RANGE_NOT_SUPPORTED)` if an unsupported `num_bits` is provided. + /// Aborts with `error::invalid_argument(E_VALUE_OUTSIDE_RANGE)` if `val_base` is not `num_bits` wide. + native fun prove_batch_range_internal( + vals: vector>, + rs: vector>, + num_bits: u64, + dst: vector, + val_base: &RistrettoPoint, + rand_base: &RistrettoPoint): (vector, vector>); + // // Testing // #[test_only] - use aptos_std::ristretto255::{Scalar, scalar_to_bytes, point_equals}; + use aptos_std::ristretto255::{Scalar, scalar_to_bytes}; + #[test_only] + use aptos_std::ristretto255_pedersen::commitment_equals; #[test_only] const A_DST: vector = b"AptosBulletproofs"; #[test_only] const A_VALUE: vector = x"870c2fa1b2e9ac45000000000000000000000000000000000000000000000000"; // i.e., 5020644638028926087u64 #[test_only] + const B_VALUE: vector = x"bb9d99fb7f9e572b000000000000000000000000000000000000000000000000"; // i.e., 3123139139123912123u64 + #[test_only] const A_BLINDER: vector = x"e7c7b42b75503bfc7b1932783786d227ebf88f79da752b68f6b865a9c179640c"; + #[test_only] + const B_BLINDER: vector = x"ce224fe5e1111a394fc254ee503aa2406706ef606efac6e2d0332711c7a7bc06"; // Pedersen commitment to A_VALUE with randomness A_BLINDER #[test_only] const A_COMM: vector = x"0a665260a4e42e575882c2cdcb3d0febd6cf168834f6de1e9e61e7b2e53dbf14"; + #[test_only] + const B_COMM: vector = x"748c244d880a1de3970a3d01670a04db6b74b9741bfec8732e512312384a6515"; // Range proof for A_COMM using domain-separation tag in A_DST, and MAX_RANGE_BITS #[test_only] const A_RANGE_PROOF_PEDERSEN: vector = x"d8d422d3fb9511d1942b78e3ec1a8c82fe1c01a0a690c55a4761e7e825633a753cca816667d2cbb716fe04a9c199cad748c2d4e59de4ed04fedf5f04f4341a74ae75b63c1997fd65d5fb3a8c03ad8771abe2c0a4f65d19496c11d948d6809503eac4d996f2c6be4e64ebe2df31102c96f106695bdf489dc9290c93b4d4b5411fb6298d0c33afa57e2e1948c38ef567268a661e7b1c099272e29591e717930a06a2c6e0e2d56aedea3078fd59334634f1a4543069865409eba074278f191039083102a9a0621791a9be09212a847e22061e083d7a712b05bca7274b25e4cb1201c679c4957f0842d7661fa1d3f5456a651e89112628b456026f8ad3a7abeaba3fec8031ec8b0392c0aa6c96205f7b21b0c2d6b5d064bd5bd1a1d91c41625d910688fa0dca35ec0f0e31a45792f8d6a330be970a22e1e0773111a083de893c89419ee7de97295978de90bcdf873a2826746809e64f9143417dbed09fa1c124e673febfed65c137cc45fabda963c96b64645802d1440cba5e58717e539f55f3321ab0c0f60410fba70070c5db500fee874265a343a2a59773fd150bcae09321a5166062e176e2e76bef0e3dd1a9250bcb7f4c971c10f0b24eb2a94e009b72c1fc21ee4267881e27b4edba8bed627ddf37e0c53cd425bc279d0c50d154d136503e54882e9541820d6394bd52ca2b438fd8c517f186fec0649c4846c4e43ce845d80e503dee157ce55392188039a7efc78719107ab989db8d9363b9dfc1946f01a84dbca5e742ed5f30b07ac61cf17ce2cf2c6a49d799ed3968a63a3ccb90d9a0e50960d959f17f202dd5cf0f2c375a8a702e063d339e48c0227e7cf710157f63f13136d8c3076c672ea2c1028fc1825366a145a4311de6c2cc46d3144ae3d2bc5808819b9817be3fce1664ecb60f74733e75e97ca8e567d1b81bdd4c56c7a340ba00"; + #[test_only] + const AB_BATCH_RANGE_PROOF_PEDERSEN: vector = x"103086c56ead10712514d2807c5605cb5f3a090566196549b5f03bedd7c1f450b4619bca9b00f87b2e039e844c24f9f2512901eea7f8f322f218f58c37186e1bd40ae74942f69b18f6806a536b2ab0793ab8e646eafc6e31d5219545dfcbb21334230c4e063e682d1f37fdfe7258d1735af1ba4764ca182803ef4566ddd386143550b83b8d686514988ee05bb7b4180f3b296a0a9711976365b678b537e2190c49cecded1d209ecec733e5cb85d5427f1f2ef1a44ebac41fdbf822692bd68b012515065faab0611aaabe87c1facbe68e648f2e2a0de6e5e81490dfa178546d0e1ec7a7c7ee6eb1e72f0e62b6a81abf23d4e4f946e5c5b28ca287d7ee30c72667ec1203ea9314a4ef182e3ed8a49700cb2452c3765fd29611e2abb5d8aa1970387452cd473383707a0b8e2eb46ba6826654e03ba5f73b56a0ae30012dc723576e76b280339600decef76eda350232ee9e53b373d745b958a19c8b4e7133f4b846727dab188441bb7d2484a73a9a83c1c94e7bea0ea0253418d3d5a751e63f940106e597772d169a01d93b495d10c08725c5d8cdef24306a164a2e1fa1b19eb0217239bbc661e0f1ead2bf3ecc3f178b6b49c61aa2c45f4832ba9ebc2744b79b413081e824b0978cab1934d29760f77751450e409da17941ff693b7dbc0b45d0659aeca05e1e92572fcd4c4d5846e7963e25cce6d54fc4a963da031747695a8e2000469e22e682e1b3f141891121d189504db63b4ab40e0d4c59f0b945b8188b79f0eb4916723a757bcfc787863ff28c5555c8ad93df81bba7b2ff9c164e180331a8b24cff4a9de0d2a8b71f73d24521781f0ced1a064698af138c00160c87eb7ffca5ab1d9a1bec5144c648c5f51a6093dbe8ed88a2fcaab4d5412c60ebb25827d8cab48787f705c5781e2ecd82939d3b3f864c21701fcecbc57b196db7c055273e86ac654a24016abd8ba7c6e87610a0e1b70ff57378992b2d5d45c963829b0aa9323b0dde3f02382e583cb3733c187b46903ed629820ec8043a8c18df42dc0a"; #[test(fx = @std)] #[expected_failure(abort_code = 0x010003, location = Self)] fun test_unsupported_ranges(fx: signer) { features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_feature() ], vector[]); - let comm = ristretto255::new_point_from_bytes(A_COMM); - let comm = std::option::extract(&mut comm); - let comm = pedersen::commitment_from_point(comm); + let comm = pedersen::new_commitment_from_bytes(A_COMM); + let comm = comm.extract(); - assert!(verify_range_proof_pedersen( - &comm, - &range_proof_from_bytes(A_RANGE_PROOF_PEDERSEN), 10, A_DST), 1); + verify_range_proof_pedersen(&comm, &range_proof_from_bytes(A_RANGE_PROOF_PEDERSEN), 10, A_DST); + } + + #[test(fx = @std)] + #[expected_failure(abort_code = 0x010003, location = Self)] + fun test_unsupported_ranges_batch(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + + let comm_a = pedersen::new_commitment_from_bytes(A_COMM); + let comm_b = pedersen::new_commitment_from_bytes(B_COMM); + + let comms = vector[comm_a.extract(), comm_b.extract()]; + + verify_batch_range_proof_pedersen(&comms, &range_proof_from_bytes(AB_BATCH_RANGE_PROOF_PEDERSEN), 10, A_DST); } #[test(fx = @std)] @@ -183,7 +342,7 @@ module aptos_std::ristretto255_bulletproofs { let v = ristretto255::new_scalar_from_u64(59); let r = ristretto255::new_scalar_from_bytes(A_BLINDER); - let r = std::option::extract(&mut r); + let r = r.extract(); let num_bits = 8; let (proof, comm) = prove_range_pedersen(&v, &r, num_bits, A_DST); @@ -194,6 +353,65 @@ module aptos_std::ristretto255_bulletproofs { assert!(verify_range_proof_pedersen(&comm, &proof, num_bits, A_DST), 1); } + #[test(fx = @std)] + fun test_batch_prover(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + + let vs = vector[ + ristretto255::new_scalar_from_u64(59), + ristretto255::new_scalar_from_u64(60), + ]; + let rs = vector[ + ristretto255::new_scalar_from_bytes(A_BLINDER).extract(), + ristretto255::new_scalar_from_bytes(B_BLINDER).extract(), + ]; + let num_bits = 8; + + let (proof, comms) = prove_batch_range_pedersen(&vs, &rs, num_bits, A_DST); + + assert!(verify_batch_range_proof_pedersen(&comms, &proof, 64, A_DST) == false, 1); + assert!(verify_batch_range_proof_pedersen(&comms, &proof, 32, A_DST) == false, 1); + assert!(verify_batch_range_proof_pedersen(&comms, &proof, 16, A_DST) == false, 1); + assert!(verify_batch_range_proof_pedersen(&comms, &proof, num_bits, A_DST), 1); + } + + #[test(fx = @std)] + #[expected_failure(abort_code = 0x030007, location = Self)] + fun test_bulletproof_feature_disabled(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ ], vector[ features::get_bulletproofs_feature()]); + + let v = ristretto255::new_scalar_from_u64(59); + let r = ristretto255::new_scalar_from_bytes(A_BLINDER); + let r = r.extract(); + let num_bits = 8; + + let (proof, comm) = prove_range_pedersen(&v, &r, num_bits, A_DST); + + // This will fail with error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE) + verify_range_proof_pedersen(&comm, &proof, num_bits, A_DST); + } + + #[test(fx = @std)] + #[expected_failure(abort_code = 0x030007, location = Self)] + fun test_bulletproof_batch_feature_disabled(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ ], vector[ features::get_bulletproofs_batch_feature() ]); + + let vs = vector[ + ristretto255::new_scalar_from_u64(59), + ristretto255::new_scalar_from_u64(60), + ]; + let rs = vector[ + ristretto255::new_scalar_from_bytes(A_BLINDER).extract(), + ristretto255::new_scalar_from_bytes(B_BLINDER).extract(), + ]; + let num_bits = 8; + + let (proof, comms) = prove_batch_range_pedersen(&vs, &rs, num_bits, A_DST); + + // This will fail with error::invalid_state(E_NATIVE_FUN_NOT_AVAILABLE) + verify_batch_range_proof_pedersen(&comms, &proof, num_bits, A_DST); + } + #[test(fx = @std)] #[expected_failure(abort_code = 0x010001, location = Self)] fun test_empty_range_proof(fx: signer) { @@ -211,43 +429,184 @@ module aptos_std::ristretto255_bulletproofs { } #[test(fx = @std)] - fun test_valid_range_proof_verifies_against_comm(fx: signer) { + #[expected_failure(abort_code = 0x010001, location = Self)] + fun test_empty_batch_range_proof(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + + let proof = &range_proof_from_bytes(vector[ ]); + let num_bits = 64; + let comms = vector[pedersen::new_commitment_for_bulletproof( + &ristretto255::scalar_one(), + &ristretto255::new_scalar_from_sha2_512(b"hello random world") + )]; + + // This will fail with error::invalid_argument(E_DESERIALIZE_RANGE_PROOF) + verify_batch_range_proof_pedersen(&comms, proof, num_bits, A_DST); + } + + #[test(fx = @std)] + #[expected_failure(abort_code = 0x010002, location = Self)] + fun test_value_outside_range_range_proof(fx: signer) { features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_feature() ], vector[]); + let value_a = ristretto255::new_scalar_from_bytes(A_VALUE); + let value_b = ristretto255::new_scalar_from_u128(1 << 65); + + let blinder_a = ristretto255::new_scalar_from_bytes(A_BLINDER); + let blinder_b = ristretto255::new_scalar_from_bytes(B_BLINDER); + + let values = vector[value_a.extract(), value_b]; + let blinders = vector[blinder_a.extract(), blinder_b.extract()]; + + // This will fail with error::invalid_argument(E_VALUE_OUTSIDE_RANGE) + prove_batch_range_pedersen(&values, &blinders, 64, A_DST); + } + + #[test(fx = @std)] + #[expected_failure(abort_code = 0x010002, location = Self)] + fun test_value_outside_range_batch_range_proof(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + + let value = ristretto255::new_scalar_from_u128(1 << 65); + let blinder = ristretto255::new_scalar_from_bytes(A_BLINDER).extract(); + + // This will fail with error::invalid_argument(E_VALUE_OUTSIDE_RANGE) + prove_range_pedersen(&value, &blinder, 64, A_DST); + } + + #[test(fx = @std)] + #[expected_failure(abort_code = 0x010004, location = Self)] + fun test_invalid_batch_size_range_proof(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + + let value_a = ristretto255::new_scalar_from_bytes(A_VALUE); + let value_b = ristretto255::new_scalar_from_bytes(B_VALUE); + let value_c = ristretto255::new_scalar_from_u32(1); + + let blinder_a = ristretto255::new_scalar_from_bytes(A_BLINDER); + let blinder_b = ristretto255::new_scalar_from_bytes(B_BLINDER); + let blinder_c = ristretto255::new_scalar_from_u32(1); + + let values = vector[ + value_a.extract(), + value_b.extract(), + value_c, + ]; + let blinders = vector[ + blinder_a.extract(), + blinder_b.extract(), + blinder_c, + ]; + + // This will fail with error::invalid_argument(E_BATCH_SIZE_NOT_SUPPORTED) + prove_batch_range_pedersen(&values, &blinders, 64, A_DST); + } + + #[test(fx = @std)] + #[expected_failure(abort_code = 0x010005, location = Self)] + fun test_invalid_args_batch_range_proof(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + + let value_a = ristretto255::new_scalar_from_bytes(A_VALUE); + let value_b = ristretto255::new_scalar_from_bytes(B_VALUE); + + let blinder_a = ristretto255::new_scalar_from_bytes(A_BLINDER); + + let values = vector[value_a.extract(), value_b.extract()]; + let blinders = vector[blinder_a.extract()]; + + // This will fail with error::invalid_argument(E_VECTOR_LENGTHS_MISMATCH) + prove_batch_range_pedersen(&values, &blinders, 64, A_DST); + } + + #[test(fx = @std)] + fun test_valid_range_proof_verifies_against_comm(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + let value = ristretto255::new_scalar_from_bytes(A_VALUE); - let value = std::option::extract(&mut value); + let value = value.extract(); let blinder = ristretto255::new_scalar_from_bytes(A_BLINDER); - let blinder = std::option::extract(&mut blinder); + let blinder = blinder.extract(); let comm = pedersen::new_commitment_for_bulletproof(&value, &blinder); - let expected_comm = std::option::extract(&mut ristretto255::new_point_from_bytes(A_COMM)); - assert!(point_equals(pedersen::commitment_as_point(&comm), &expected_comm), 1); + let expected_comm = pedersen::new_commitment_from_bytes(A_COMM).extract(); + assert!(commitment_equals(&comm, &expected_comm), 1); assert!(verify_range_proof_pedersen( &comm, &range_proof_from_bytes(A_RANGE_PROOF_PEDERSEN), MAX_RANGE_BITS, A_DST), 1); } + #[test(fx = @std)] + fun test_valid_batch_range_proof_verifies_against_comm(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + + let value_a = ristretto255::new_scalar_from_bytes(A_VALUE); + let value_b = ristretto255::new_scalar_from_bytes(B_VALUE); + + let blinder_a = ristretto255::new_scalar_from_bytes(A_BLINDER); + let blinder_b = ristretto255::new_scalar_from_bytes(B_BLINDER); + + let values = vector[value_a.extract(), value_b.extract()]; + let blinders = vector[blinder_a.extract(), blinder_b.extract()]; + + let comms = values.zip_map( + blinders, + |val, blinder| { + pedersen::new_commitment_for_bulletproof(&val, &blinder) + } + ); + + assert!(commitment_equals(comms.borrow(0), &pedersen::new_commitment_from_bytes(A_COMM).extract() + ), 1); + assert!(commitment_equals(comms.borrow(1), &pedersen::new_commitment_from_bytes(B_COMM).extract() + ), 1); + + assert!(verify_batch_range_proof_pedersen( + &comms, + &range_proof_from_bytes(AB_BATCH_RANGE_PROOF_PEDERSEN), MAX_RANGE_BITS, A_DST), 1); + } + #[test(fx = @std)] fun test_invalid_range_proof_fails_verification(fx: signer) { features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_feature() ], vector[]); - let comm = ristretto255::new_point_from_bytes(A_COMM); - let comm = std::option::extract(&mut comm); - let comm = pedersen::commitment_from_point(comm); + let comm = pedersen::new_commitment_from_bytes(A_COMM); + let comm = comm.extract(); // Take a valid proof... let range_proof_invalid = A_RANGE_PROOF_PEDERSEN; // ...and modify a byte in the middle of the proof - let pos = std::vector::length(&range_proof_invalid) / 2; - let byte = std::vector::borrow_mut(&mut range_proof_invalid, pos); - *byte = *byte + 1; + let pos = range_proof_invalid.length() / 2; + let byte = range_proof_invalid.borrow_mut(pos); + *byte += 1; assert!(verify_range_proof_pedersen( &comm, &range_proof_from_bytes(range_proof_invalid), MAX_RANGE_BITS, A_DST) == false, 1); } + + #[test(fx = @std)] + fun test_invalid_batch_range_proof_fails_verification(fx: signer) { + features::change_feature_flags_for_testing(&fx, vector[ features::get_bulletproofs_batch_feature() ], vector[]); + + let comm_a = pedersen::new_commitment_from_bytes(A_COMM); + let comm_b = pedersen::new_commitment_from_bytes(B_COMM); + + let comms = vector[comm_a.extract(), comm_b.extract()]; + + // Take a valid proof... + let range_proof_invalid = AB_BATCH_RANGE_PROOF_PEDERSEN; + + // ...and modify a byte in the middle of the proof + let pos = range_proof_invalid.length() / 2; + range_proof_invalid[pos] += 1; + + assert!(verify_batch_range_proof_pedersen( + &comms, + &range_proof_from_bytes(range_proof_invalid), MAX_RANGE_BITS, A_DST) == false, 1); + } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_bulletproofs.spec.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_bulletproofs.spec.move index 0e442a84d0b1d..4a63b2009ff24 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_bulletproofs.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_bulletproofs.spec.move @@ -2,4 +2,8 @@ spec aptos_std::ristretto255_bulletproofs { spec verify_range_proof_internal { // TODO: temporary mockup. pragma opaque; } + + spec verify_batch_range_proof_internal { + pragma opaque; + } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_elgamal.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_elgamal.move index a6912e7b1414f..01daf643235d9 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_elgamal.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_elgamal.move @@ -41,9 +41,9 @@ module aptos_std::ristretto255_elgamal { /// Creates a new public key from a serialized Ristretto255 point. public fun new_pubkey_from_bytes(bytes: vector): Option { let point = ristretto255::new_compressed_point_from_bytes(bytes); - if (std::option::is_some(&mut point)) { + if (point.is_some()) { let pk = CompressedPubkey { - point: std::option::extract(&mut point) + point: point.extract() }; std::option::some(pk) } else { @@ -69,19 +69,19 @@ module aptos_std::ristretto255_elgamal { /// Creates a new ciphertext from two serialized Ristretto255 points: the first 32 bytes store `r * G` while the /// next 32 bytes store `v * G + r * Y`, where `Y` is the public key. public fun new_ciphertext_from_bytes(bytes: vector): Option { - if(vector::length(&bytes) != 64) { + if(bytes.length() != 64) { return std::option::none() }; - let bytes_right = vector::trim(&mut bytes, 32); + let bytes_right = bytes.trim(32); let left_point = ristretto255::new_point_from_bytes(bytes); let right_point = ristretto255::new_point_from_bytes(bytes_right); - if (std::option::is_some(&mut left_point) && std::option::is_some(&mut right_point)) { + if (left_point.is_some::() && right_point.is_some::()) { std::option::some(Ciphertext { - left: std::option::extract(&mut left_point), - right: std::option::extract(&mut right_point) + left: left_point.extract::(), + right: right_point.extract::() }) } else { std::option::none() @@ -118,8 +118,8 @@ module aptos_std::ristretto255_elgamal { let bytes_left = ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.left)); let bytes_right = ristretto255::point_to_bytes(&ristretto255::point_compress(&ct.right)); let bytes = vector::empty(); - vector::append(&mut bytes, bytes_left); - vector::append(&mut bytes, bytes_right); + bytes.append::(bytes_left); + bytes.append::(bytes_right); bytes } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_pedersen.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_pedersen.move index 7a49a04041d3c..6e13fe05faf41 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_pedersen.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/ristretto255_pedersen.move @@ -31,9 +31,9 @@ module aptos_std::ristretto255_pedersen { /// Creates a new public key from a serialized Ristretto255 point. public fun new_commitment_from_bytes(bytes: vector): Option { let point = ristretto255::new_point_from_bytes(bytes); - if (std::option::is_some(&mut point)) { + if (point.is_some()) { let comm = Commitment { - point: std::option::extract(&mut point) + point: point.extract() }; std::option::some(comm) } else { @@ -78,7 +78,7 @@ module aptos_std::ristretto255_pedersen { /// base used in the Bulletproofs library (i.e., `BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE`). public fun new_commitment_for_bulletproof(v: &Scalar, r: &Scalar): Commitment { let rand_base = ristretto255::new_point_from_bytes(BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE); - let rand_base = std::option::extract(&mut rand_base); + let rand_base = rand_base.extract(); Commitment { point: ristretto255::basepoint_double_mul(r, &rand_base, v) @@ -153,6 +153,6 @@ module aptos_std::ristretto255_pedersen { /// Bulletproof has a default choice for `g` and `h` and this function returns the default `h` as used in the /// Bulletproofs Move module. public fun randomness_base_for_bulletproof(): RistrettoPoint { - std::option::extract(&mut ristretto255::new_point_from_bytes(BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE)) + ristretto255::new_point_from_bytes(BULLETPROOF_DEFAULT_PEDERSEN_RAND_BASE).extract() } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256k1.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256k1.move index 8acf9368e7f9c..1b16dd55b5232 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256k1.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256k1.move @@ -6,6 +6,9 @@ module aptos_std::secp256k1 { /// An error occurred while deserializing, for example due to wrong input size. const E_DESERIALIZE: u64 = 1; // This code must be the same, if ever returned from the native Rust implementation. + /// Recovery ID needs to be either 0, 1, 2 or 3. If you are recovering from an (r, s, v) Ethereum signature, take its v value and, set the recovery_id as follows: if v == 27, set to 0, if v == 28, set to 1, if v == 37, set to 0, if v == 38, set to 1. + const E_BAD_RECOVERY_ID: u64 = 2; + /// The size of a secp256k1-based ECDSA public key, in bytes. const RAW_PUBLIC_KEY_NUM_BYTES: u64 = 64; //const COMPRESSED_PUBLIC_KEY_SIZE: u64 = 33; @@ -25,13 +28,13 @@ module aptos_std::secp256k1 { /// Constructs an ECDSASignature struct from the given 64 bytes. public fun ecdsa_signature_from_bytes(bytes: vector): ECDSASignature { - assert!(std::vector::length(&bytes) == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE)); + assert!(bytes.length() == SIGNATURE_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE)); ECDSASignature { bytes } } /// Constructs an ECDSARawPublicKey struct, given a 64-byte raw representation. public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector): ECDSARawPublicKey { - assert!(std::vector::length(&bytes) == RAW_PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE)); + assert!(bytes.length() == RAW_PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE)); ECDSARawPublicKey { bytes } } @@ -45,18 +48,37 @@ module aptos_std::secp256k1 { sig.bytes } - /// Recovers the signer's raw (64-byte) public key from a secp256k1 ECDSA `signature` given the `recovery_id` and the signed - /// `message` (32 byte digest). + /// Recovers the signer's raw (64-byte) public key from a secp256k1 ECDSA `signature` given the (2-bit) `recovery_id` + /// and the signed `message` (32 byte digest). + /// + /// This recovery algorithm can only be used to check validity of a signature if the signer's public key (or its + /// hash) is known beforehand. When the algorithm returns a public key `pk`, this means that the signature in + /// `signature` verified on `message` under that `pk`. But, again, that is only meaningful if `pk` is the "right" + /// one (e.g., in Ethereum, the "right" `pk` is the one whose hash matches the account's address). + /// + /// If you do not understand this nuance, please learn more about ECDSA and pubkey recovery (see + /// https://alinush.github.io/ecdsa#pubkey-recovery), or you risk writing completely-insecure code. /// - /// Note that an invalid signature, or a signature from a different message, will result in the recovery of an - /// incorrect public key. This recovery algorithm can only be used to check validity of a signature if the signer's - /// public key (or its hash) is known beforehand. + /// Note: This function does not apply any additional hashing on the `message`; it simply passes in the message as + /// raw bytes to the ECDSA recovery function. (The max allowed size ~32 bytes.) + /// + Nonetheless, most applications will first hash the message to be signed. So, typically, `message` here tends + /// to be a hash rather than an actual message. Therefore, the developer should be aware of what hash function + /// was used for this. + /// + In particular, if using this function to verify an Ethereum signature, you will likely have to input + /// a keccak256 hash of the message as the `message` parameter. public fun ecdsa_recover( message: vector, recovery_id: u8, signature: &ECDSASignature, ): Option { + + // If recovery ID is not 0 or 1 or 2 or 3, help the caller out by aborting with `E_BAD_RECOVERY_ID` + if(recovery_id != 0 && recovery_id != 1 && recovery_id != 2 && recovery_id != 3) { + abort std::error::invalid_argument(E_BAD_RECOVERY_ID); + }; + let (pk, success) = ecdsa_recover_internal(message, recovery_id, signature.bytes); + if (success) { std::option::some(ecdsa_raw_public_key_from_64_bytes(pk)) } else { @@ -80,6 +102,17 @@ module aptos_std::secp256k1 { // Tests // + #[test] + #[expected_failure(abort_code = 65538, location = Self)] + /// Tests that bad recovery IDs get rejected + fun test_bad_ecdsa_recovery_id() { + let _ = ecdsa_recover( + b"test aptos secp256k1", + 4, + &ECDSASignature { bytes: x"f7ad936da03f948c14c542020e3c5f4e02aaacd1f20427c11aa6e2fbf8776477646bba0e1a37f9e7c777c423a1d2849baafd7ff6a9930814a43c3f80d59db56f" }, + ); + } + #[test] /// Test on a valid secp256k1 ECDSA signature created using sk = x"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" fun test_ecdsa_recover() { @@ -90,8 +123,9 @@ module aptos_std::secp256k1 { 0, &ECDSASignature { bytes: x"f7ad936da03f948c14c542020e3c5f4e02aaacd1f20427c11aa6e2fbf8776477646bba0e1a37f9e7c777c423a1d2849baafd7ff6a9930814a43c3f80d59db56f" }, ); - assert!(std::option::is_some(&pk), 1); - assert!(std::option::extract(&mut pk).bytes == x"4646ae5047316b4230d0086c8acec687f00b1cd9d1dc634f6cb358ac0a9a8ffffe77b4dd0a4bfb95851f3b7355c781dd60f8418fc8a65d14907aff47c903a559", 1); + assert!(pk.is_some(), 1); + assert!( + pk.extract().bytes == x"4646ae5047316b4230d0086c8acec687f00b1cd9d1dc634f6cb358ac0a9a8ffffe77b4dd0a4bfb95851f3b7355c781dd60f8418fc8a65d14907aff47c903a559", 1); // Flipped bits; Signature stays valid let pk = ecdsa_recover( @@ -100,8 +134,9 @@ module aptos_std::secp256k1 { // NOTE: A '7' was flipped to an 'f' here &ECDSASignature { bytes: x"f7ad936da03f948c14c542020e3c5f4e02aaacd1f20427c11aa6e2fbf8776477646bba0e1a37f9e7c7f7c423a1d2849baafd7ff6a9930814a43c3f80d59db56f" }, ); - assert!(std::option::is_some(&pk), 1); - assert!(std::option::extract(&mut pk).bytes != x"4646ae5047316b4230d0086c8acec687f00b1cd9d1dc634f6cb358ac0a9a8ffffe77b4dd0a4bfb95851f3b7355c781dd60f8418fc8a65d14907aff47c903a559", 1); + assert!(pk.is_some(), 1); + assert!( + pk.extract().bytes != x"4646ae5047316b4230d0086c8acec687f00b1cd9d1dc634f6cb358ac0a9a8ffffe77b4dd0a4bfb95851f3b7355c781dd60f8418fc8a65d14907aff47c903a559", 1); // Flipped bits; Signature becomes invalid let pk = ecdsa_recover( @@ -109,6 +144,6 @@ module aptos_std::secp256k1 { 0, &ECDSASignature { bytes: x"ffad936da03f948c14c542020e3c5f4e02aaacd1f20427c11aa6e2fbf8776477646bba0e1a37f9e7c7f7c423a1d2849baafd7ff6a9930814a43c3f80d59db56f" }, ); - assert!(std::option::is_none(&pk), 1); + assert!(pk.is_none(), 1); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256k1.spec.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256k1.spec.move index 38a1f0a565780..57da42544d901 100644 --- a/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256k1.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256k1.spec.move @@ -24,6 +24,7 @@ spec aptos_std::secp256k1 { recovery_id: u8, signature: &ECDSASignature, ): Option { + aborts_if recovery_id > 3; aborts_if ecdsa_recover_internal_abort_condition(message, recovery_id, signature.bytes); let pk = spec_ecdsa_recover_internal_result_1(message, recovery_id, signature.bytes); let success = spec_ecdsa_recover_internal_result_2(message, recovery_id, signature.bytes); diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256r1.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256r1.move new file mode 100644 index 0000000000000..b16de628be270 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256r1.move @@ -0,0 +1,27 @@ +/// This module implements ECDSA signatures based on the prime-order secp256r1 ellptic curve (i.e., cofactor is 1). + +module aptos_std::secp256r1 { + + /// An error occurred while deserializing, for example due to wrong input size. + const E_DESERIALIZE: u64 = 1; // This code must be the same, if ever returned from the native Rust implementation. + + /// The size of a secp256k1-based ECDSA public key, in bytes. + const RAW_PUBLIC_KEY_NUM_BYTES: u64 = 64; + //const COMPRESSED_PUBLIC_KEY_SIZE: u64 = 33; + + /// A 64-byte ECDSA public key. + struct ECDSARawPublicKey has copy, drop, store { + bytes: vector + } + + /// Constructs an ECDSARawPublicKey struct, given a 64-byte raw representation. + public fun ecdsa_raw_public_key_from_64_bytes(bytes: vector): ECDSARawPublicKey { + assert!(bytes.length() == RAW_PUBLIC_KEY_NUM_BYTES, std::error::invalid_argument(E_DESERIALIZE)); + ECDSARawPublicKey { bytes } + } + + /// Serializes an ECDSARawPublicKey struct to 64-bytes. + public fun ecdsa_raw_public_key_to_bytes(pk: &ECDSARawPublicKey): vector { + pk.bytes + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256r1.spec.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256r1.spec.move new file mode 100644 index 0000000000000..42efb98a6fb6b --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/secp256r1.spec.move @@ -0,0 +1,5 @@ +spec aptos_std::secp256r1 { + spec module { + pragma verify = false; + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/single_key.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/single_key.move new file mode 100644 index 0000000000000..297646e3ab1ad --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/single_key.move @@ -0,0 +1,116 @@ +/// This module implements Single Key representations of public keys. +/// It is used to represent public keys for the Ed25519, SECP256K1, WebAuthn, and Keyless schemes in a unified way. + +module aptos_std::single_key { + use aptos_std::bcs_stream::{Self, deserialize_u8}; + use aptos_std::ed25519; + use aptos_std::keyless; + use aptos_std::secp256k1; + use aptos_std::secp256r1; + use aptos_std::bcs; + use aptos_std::federated_keyless; + use std::error; + use std::hash; + + // Error codes + // + + /// Unrecognized public key type. + const E_INVALID_PUBLIC_KEY_TYPE: u64 = 1; + + /// There are extra bytes in the input when deserializing a Single Key public key. + const E_INVALID_SINGLE_KEY_EXTRA_BYTES: u64 = 2; + + // + // Constants + // + + /// The identifier of the Single Key signature scheme, which is used when deriving Aptos authentication keys by hashing + /// it together with an Single Key public key. + const SIGNATURE_SCHEME_ID: u8 = 2; + + /// Scheme identifier for Ed25519 single keys. + const ED25519_PUBLIC_KEY_TYPE: u8 = 0; + + /// Scheme identifier for SECP256K1 single keys. + const SECP256K1_PUBLIC_KEY_TYPE: u8 = 1; + + /// Scheme identifier for WebAuthn single keys. + const WEB_AUTHN_PUBLIC_KEY_TYPE: u8 = 2; + + /// Scheme identifier for Keyless single keys. + const KEYLESS_PUBLIC_KEY_TYPE: u8 = 3; + + /// Scheme identifier for Federated Keyless single keys. + const FEDERATED_KEYLESS_PUBLIC_KEY_TYPE: u8 = 4; + + // + // Structs + // + + enum AnyPublicKey has copy, drop, store { + Ed25519{pk: ed25519::UnvalidatedPublicKey}, + Secp256k1Ecdsa{pk: secp256k1::ECDSARawPublicKey}, + Secp256r1Ecdsa{pk: secp256r1::ECDSARawPublicKey}, + Keyless{pk: keyless::PublicKey}, + FederatedKeyless{pk: federated_keyless::PublicKey} + } + + // + // Functions + // + + /// Parses the input bytes as a AnyPublicKey. The public key bytes are not guaranteed to be a valid + /// representation of a point on its corresponding curve if applicable. + /// It does check that the bytes deserialize into a well-formed public key for the given scheme. + public fun new_public_key_from_bytes(bytes: vector): AnyPublicKey { + let stream = bcs_stream::new(bytes); + let pk = deserialize_any_public_key(&mut stream); + assert!(!bcs_stream::has_remaining(&mut stream), error::invalid_argument(E_INVALID_SINGLE_KEY_EXTRA_BYTES)); + pk + } + + /// Deserializes a Single Key public key from a BCS stream. + public fun deserialize_any_public_key(stream: &mut bcs_stream::BCSStream): AnyPublicKey { + let scheme_id = bcs_stream::deserialize_u8(stream); + let pk: AnyPublicKey; + if (scheme_id == ED25519_PUBLIC_KEY_TYPE) { + let public_key_bytes = bcs_stream::deserialize_vector(stream, |x| deserialize_u8(x)); + pk = AnyPublicKey::Ed25519{pk: ed25519::new_unvalidated_public_key_from_bytes(public_key_bytes)} + } else if (scheme_id == SECP256K1_PUBLIC_KEY_TYPE) { + let public_key_bytes = bcs_stream::deserialize_vector(stream, |x| deserialize_u8(x)); + pk = AnyPublicKey::Secp256k1Ecdsa{pk: secp256k1::ecdsa_raw_public_key_from_64_bytes(public_key_bytes)}; + } else if (scheme_id == WEB_AUTHN_PUBLIC_KEY_TYPE) { + let public_key_bytes = bcs_stream::deserialize_vector(stream, |x| deserialize_u8(x)); + pk = AnyPublicKey::Secp256r1Ecdsa{pk: secp256r1::ecdsa_raw_public_key_from_64_bytes(public_key_bytes)}; + } else if (scheme_id == KEYLESS_PUBLIC_KEY_TYPE) { + pk = AnyPublicKey::Keyless{pk: keyless::deserialize_public_key(stream)}; + } else if (scheme_id == FEDERATED_KEYLESS_PUBLIC_KEY_TYPE) { + pk = AnyPublicKey::FederatedKeyless{pk: federated_keyless::deserialize_public_key(stream)} + } else { + abort error::invalid_argument(E_INVALID_PUBLIC_KEY_TYPE); + }; + pk + } + + /// Returns true if the public key is a keyless or federated keyless public key. + public fun is_keyless_or_federated_keyless_public_key(pk: &AnyPublicKey): bool { + match (pk) { + AnyPublicKey::Keyless { .. } => true, + AnyPublicKey::FederatedKeyless { .. } => true, + _ => false + } + } + + /// Converts an unvalidated Ed25519 public key to an AnyPublicKey. + public fun from_ed25519_public_key_unvalidated(pk: ed25519::UnvalidatedPublicKey): AnyPublicKey { + AnyPublicKey::Ed25519 { pk } + } + + /// Gets the authentication key for the AnyPublicKey. + public fun to_authentication_key(self: &AnyPublicKey): vector { + let pk_bytes = bcs::to_bytes(self); + pk_bytes.push_back(SIGNATURE_SCHEME_ID); + hash::sha3_256(pk_bytes) + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/cryptography/single_key.spec.move b/aptos-move/framework/aptos-stdlib/sources/cryptography/single_key.spec.move new file mode 100644 index 0000000000000..fe0ff02415508 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/cryptography/single_key.spec.move @@ -0,0 +1,5 @@ +spec aptos_std::single_key { + spec module { + pragma verify = false; + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move index a7eca39732823..1197affa7cd88 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.move @@ -24,7 +24,7 @@ module aptos_std::big_vector { /// Regular Vector API /// Create an empty vector. - public(friend) fun empty(bucket_size: u64): BigVector { + friend fun empty(bucket_size: u64): BigVector { assert!(bucket_size > 0, error::invalid_argument(EZERO_BUCKET_SIZE)); BigVector { buckets: table_with_length::new(), @@ -34,270 +34,266 @@ module aptos_std::big_vector { } /// Create a vector of length 1 containing the passed in element. - public(friend) fun singleton(element: T, bucket_size: u64): BigVector { + friend fun singleton(element: T, bucket_size: u64): BigVector { let v = empty(bucket_size); - push_back(&mut v, element); + v.push_back(element); v } - /// Destroy the vector `v`. - /// Aborts if `v` is not empty. - public fun destroy_empty(v: BigVector) { - assert!(is_empty(&v), error::invalid_argument(EVECTOR_NOT_EMPTY)); - let BigVector { buckets, end_index: _, bucket_size: _ } = v; - table_with_length::destroy_empty(buckets); + /// Destroy the vector `self`. + /// Aborts if `self` is not empty. + public fun destroy_empty(self: BigVector) { + assert!(self.is_empty(), error::invalid_argument(EVECTOR_NOT_EMPTY)); + let BigVector { buckets, end_index: _, bucket_size: _ } = self; + buckets.destroy_empty(); } - /// Destroy the vector `v` if T has `drop` - public fun destroy(v: BigVector) { - let BigVector { buckets, end_index, bucket_size: _ } = v; + /// Destroy the vector `self` if T has `drop` + public fun destroy(self: BigVector) { + let BigVector { buckets, end_index, bucket_size: _ } = self; let i = 0; while (end_index > 0) { - let num_elements = vector::length(&table_with_length::remove(&mut buckets, i)); - end_index = end_index - num_elements; - i = i + 1; + let num_elements = buckets.remove(i).length(); + end_index -= num_elements; + i += 1; }; - table_with_length::destroy_empty(buckets); + buckets.destroy_empty(); } - /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Acquire an immutable reference to the `i`th element of the vector `self`. /// Aborts if `i` is out of bounds. - public fun borrow(v: &BigVector, i: u64): &T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - vector::borrow(table_with_length::borrow(&v.buckets, i / v.bucket_size), i % v.bucket_size) + public fun borrow(self: &BigVector, i: u64): &T { + assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + self.buckets.borrow(i / self.bucket_size).borrow(i % self.bucket_size) } - /// Return a mutable reference to the `i`th element in the vector `v`. + /// Return a mutable reference to the `i`th element in the vector `self`. /// Aborts if `i` is out of bounds. - public fun borrow_mut(v: &mut BigVector, i: u64): &mut T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - vector::borrow_mut(table_with_length::borrow_mut(&mut v.buckets, i / v.bucket_size), i % v.bucket_size) + public fun borrow_mut(self: &mut BigVector, i: u64): &mut T { + assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + self.buckets.borrow_mut(i / self.bucket_size).borrow_mut(i % self.bucket_size) } - /// Empty and destroy the other vector, and push each of the elements in the other vector onto the lhs vector in the + /// Empty and destroy the other vector, and push each of the elements in the other vector onto the self vector in the /// same order as they occurred in other. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun append(lhs: &mut BigVector, other: BigVector) { - let other_len = length(&other); + public fun append(self: &mut BigVector, other: BigVector) { + let other_len = other.length(); let half_other_len = other_len / 2; let i = 0; while (i < half_other_len) { - push_back(lhs, swap_remove(&mut other, i)); - i = i + 1; + self.push_back(other.swap_remove(i)); + i += 1; }; while (i < other_len) { - push_back(lhs, pop_back(&mut other)); - i = i + 1; + self.push_back(other.pop_back()); + i += 1; }; - destroy_empty(other); + other.destroy_empty(); } - /// Add element `val` to the end of the vector `v`. It grows the buckets when the current buckets are full. + /// Add element `val` to the end of the vector `self`. It grows the buckets when the current buckets are full. /// This operation will cost more gas when it adds new bucket. - public fun push_back(v: &mut BigVector, val: T) { - let num_buckets = table_with_length::length(&v.buckets); - if (v.end_index == num_buckets * v.bucket_size) { - table_with_length::add(&mut v.buckets, num_buckets, vector::empty()); - vector::push_back(table_with_length::borrow_mut(&mut v.buckets, num_buckets), val); + public fun push_back(self: &mut BigVector, val: T) { + let num_buckets = self.buckets.length(); + if (self.end_index == num_buckets * self.bucket_size) { + self.buckets.add(num_buckets, vector::empty()); + self.buckets.borrow_mut(num_buckets).push_back(val); } else { - vector::push_back(table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1), val); + self.buckets.borrow_mut(num_buckets - 1).push_back(val); }; - v.end_index = v.end_index + 1; + self.end_index += 1; } - /// Pop an element from the end of vector `v`. It doesn't shrink the buckets even if they're empty. + /// Pop an element from the end of vector `self`. It doesn't shrink the buckets even if they're empty. /// Call `shrink_to_fit` explicity to deallocate empty buckets. - /// Aborts if `v` is empty. - public fun pop_back(v: &mut BigVector): T { - assert!(!is_empty(v), error::invalid_state(EVECTOR_EMPTY)); - let num_buckets = table_with_length::length(&v.buckets); - let last_bucket = table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1); - let val = vector::pop_back(last_bucket); + /// Aborts if `self` is empty. + public fun pop_back(self: &mut BigVector): T { + assert!(!self.is_empty(), error::invalid_state(EVECTOR_EMPTY)); + let num_buckets = self.buckets.length(); + let last_bucket = self.buckets.borrow_mut(num_buckets - 1); + let val = last_bucket.pop_back(); // Shrink the table if the last vector is empty. - if (vector::is_empty(last_bucket)) { + if (last_bucket.is_empty()) { move last_bucket; - vector::destroy_empty(table_with_length::remove(&mut v.buckets, num_buckets - 1)); + self.buckets.remove(num_buckets - 1).destroy_empty(); }; - v.end_index = v.end_index - 1; + self.end_index -= 1; val } - /// Remove the element at index i in the vector v and return the owned value that was previously stored at i in v. + /// Remove the element at index i in the vector v and return the owned value that was previously stored at i in self. /// All elements occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun remove(v: &mut BigVector, i: u64): T { - let len = length(v); + public fun remove(self: &mut BigVector, i: u64): T { + let len = self.length(); assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let num_buckets = table_with_length::length(&v.buckets); - let cur_bucket_index = i / v.bucket_size + 1; - let cur_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index - 1); - let res = vector::remove(cur_bucket, i % v.bucket_size); - v.end_index = v.end_index - 1; + let num_buckets = self.buckets.length(); + let cur_bucket_index = i / self.bucket_size + 1; + let cur_bucket = self.buckets.borrow_mut(cur_bucket_index - 1); + let res = cur_bucket.remove(i % self.bucket_size); + self.end_index -= 1; move cur_bucket; while ({ spec { invariant cur_bucket_index <= num_buckets; - invariant table_with_length::spec_len(v.buckets) == num_buckets; + invariant table_with_length::spec_len(self.buckets) == num_buckets; }; (cur_bucket_index < num_buckets) }) { // remove one element from the start of current vector - let cur_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index); - let t = vector::remove(cur_bucket, 0); + let cur_bucket = self.buckets.borrow_mut(cur_bucket_index); + let t = cur_bucket.remove(0); move cur_bucket; // and put it at the end of the last one - let prev_bucket = table_with_length::borrow_mut(&mut v.buckets, cur_bucket_index - 1); - vector::push_back(prev_bucket, t); - cur_bucket_index = cur_bucket_index + 1; + let prev_bucket = self.buckets.borrow_mut(cur_bucket_index - 1); + prev_bucket.push_back(t); + cur_bucket_index += 1; }; spec { assert cur_bucket_index == num_buckets; }; // Shrink the table if the last vector is empty. - let last_bucket = table_with_length::borrow_mut(&mut v.buckets, num_buckets - 1); - if (vector::is_empty(last_bucket)) { + let last_bucket = self.buckets.borrow_mut(num_buckets - 1); + if (last_bucket.is_empty()) { move last_bucket; - vector::destroy_empty(table_with_length::remove(&mut v.buckets, num_buckets - 1)); + self.buckets.remove(num_buckets - 1).destroy_empty(); }; res } - /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// Swap the `i`th element of the vector `self` with the last element and then pop the vector. /// This is O(1), but does not preserve ordering of elements in the vector. /// Aborts if `i` is out of bounds. - public fun swap_remove(v: &mut BigVector, i: u64): T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let last_val = pop_back(v); + public fun swap_remove(self: &mut BigVector, i: u64): T { + assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + let last_val = self.pop_back(); // if the requested value is the last one, return it - if (v.end_index == i) { + if (self.end_index == i) { return last_val }; // because the lack of mem::swap, here we swap remove the requested value from the bucket // and append the last_val to the bucket then swap the last bucket val back - let bucket = table_with_length::borrow_mut(&mut v.buckets, i / v.bucket_size); - let bucket_len = vector::length(bucket); - let val = vector::swap_remove(bucket, i % v.bucket_size); - vector::push_back(bucket, last_val); - vector::swap(bucket, i % v.bucket_size, bucket_len - 1); + let bucket = self.buckets.borrow_mut(i / self.bucket_size); + let bucket_len = bucket.length(); + let val = bucket.swap_remove(i % self.bucket_size); + bucket.push_back(last_val); + bucket.swap(i % self.bucket_size, bucket_len - 1); val } - /// Swap the elements at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds - /// for v. - public fun swap(v: &mut BigVector, i: u64, j: u64) { - assert!(i < length(v) && j < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let i_bucket_index = i / v.bucket_size; - let j_bucket_index = j / v.bucket_size; - let i_vector_index = i % v.bucket_size; - let j_vector_index = j % v.bucket_size; + /// Swap the elements at the i'th and j'th indices in the vector self. Will abort if either of i or j are out of bounds + /// for self. + public fun swap(self: &mut BigVector, i: u64, j: u64) { + assert!(i < self.length() && j < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + let i_bucket_index = i / self.bucket_size; + let j_bucket_index = j / self.bucket_size; + let i_vector_index = i % self.bucket_size; + let j_vector_index = j % self.bucket_size; if (i_bucket_index == j_bucket_index) { - vector::swap(table_with_length::borrow_mut(&mut v.buckets, i_bucket_index), i_vector_index, j_vector_index); + self.buckets.borrow_mut(i_bucket_index).swap(i_vector_index, j_vector_index); return }; // If i and j are in different buckets, take the buckets out first for easy mutation. - let bucket_i = table_with_length::remove(&mut v.buckets, i_bucket_index); - let bucket_j = table_with_length::remove(&mut v.buckets, j_bucket_index); + let bucket_i = self.buckets.remove(i_bucket_index); + let bucket_j = self.buckets.remove(j_bucket_index); // Get the elements from buckets by calling `swap_remove`. - let element_i = vector::swap_remove(&mut bucket_i, i_vector_index); - let element_j = vector::swap_remove(&mut bucket_j, j_vector_index); + let element_i = bucket_i.swap_remove(i_vector_index); + let element_j = bucket_j.swap_remove(j_vector_index); // Swap the elements and push back to the other bucket. - vector::push_back(&mut bucket_i, element_j); - vector::push_back(&mut bucket_j, element_i); - let last_index_in_bucket_i = vector::length(&bucket_i) - 1; - let last_index_in_bucket_j = vector::length(&bucket_j) - 1; + bucket_i.push_back(element_j); + bucket_j.push_back(element_i); + let last_index_in_bucket_i = bucket_i.length() - 1; + let last_index_in_bucket_j = bucket_j.length() - 1; // Re-position the swapped elements to the right index. - vector::swap(&mut bucket_i, i_vector_index, last_index_in_bucket_i); - vector::swap(&mut bucket_j, j_vector_index, last_index_in_bucket_j); + bucket_i.swap(i_vector_index, last_index_in_bucket_i); + bucket_j.swap(j_vector_index, last_index_in_bucket_j); // Add back the buckets. - table_with_length::add(&mut v.buckets, i_bucket_index, bucket_i); - table_with_length::add(&mut v.buckets, j_bucket_index, bucket_j); + self.buckets.add(i_bucket_index, bucket_i); + self.buckets.add(j_bucket_index, bucket_j); } - /// Reverse the order of the elements in the vector v in-place. + /// Reverse the order of the elements in the vector self in-place. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun reverse(v: &mut BigVector) { + public fun reverse(self: &mut BigVector) { let new_buckets = vector[]; let push_bucket = vector[]; - let num_buckets = table_with_length::length(&v.buckets); + let num_buckets = self.buckets.length(); let num_buckets_left = num_buckets; while (num_buckets_left > 0) { - let pop_bucket = table_with_length::remove(&mut v.buckets, num_buckets_left - 1); - vector::for_each_reverse(pop_bucket, |val| { - vector::push_back(&mut push_bucket, val); - if (vector::length(&push_bucket) == v.bucket_size) { - vector::push_back(&mut new_buckets, push_bucket); + let pop_bucket = self.buckets.remove(num_buckets_left - 1); + pop_bucket.for_each_reverse(|val| { + push_bucket.push_back(val); + if (push_bucket.length() == self.bucket_size) { + new_buckets.push_back(push_bucket); push_bucket = vector[]; }; }); - num_buckets_left = num_buckets_left - 1; + num_buckets_left -= 1; }; - if (vector::length(&push_bucket) > 0) { - vector::push_back(&mut new_buckets, push_bucket); + if (push_bucket.length() > 0) { + new_buckets.push_back(push_bucket); } else { - vector::destroy_empty(push_bucket); + push_bucket.destroy_empty(); }; - vector::reverse(&mut new_buckets); - let i = 0; - assert!(table_with_length::length(&v.buckets) == 0, 0); - while (i < num_buckets) { - table_with_length::add(&mut v.buckets, i, vector::pop_back(&mut new_buckets)); - i = i + 1; + new_buckets.reverse(); + assert!(self.buckets.length() == 0, 0); + for (i in 0..num_buckets) { + self.buckets.add(i, new_buckets.pop_back()); }; - vector::destroy_empty(new_buckets); + new_buckets.destroy_empty(); } - /// Return the index of the first occurrence of an element in v that is equal to e. Returns (true, index) if such an + /// Return the index of the first occurrence of an element in self that is equal to e. Returns (true, index) if such an /// element was found, and (false, 0) otherwise. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun index_of(v: &BigVector, val: &T): (bool, u64) { - let num_buckets = table_with_length::length(&v.buckets); + public fun index_of(self: &BigVector, val: &T): (bool, u64) { + let num_buckets = self.buckets.length(); let bucket_index = 0; while (bucket_index < num_buckets) { - let cur = table_with_length::borrow(&v.buckets, bucket_index); - let (found, i) = vector::index_of(cur, val); + let cur = self.buckets.borrow(bucket_index); + let (found, i) = cur.index_of(val); if (found) { - return (true, bucket_index * v.bucket_size + i) + return (true, bucket_index * self.bucket_size + i) }; - bucket_index = bucket_index + 1; + bucket_index += 1; }; (false, 0) } - /// Return if an element equal to e exists in the vector v. + /// Return if an element equal to e exists in the vector self. /// Disclaimer: This function is costly. Use it at your own discretion. - public fun contains(v: &BigVector, val: &T): bool { - if (is_empty(v)) return false; - let (exist, _) = index_of(v, val); + public fun contains(self: &BigVector, val: &T): bool { + if (self.is_empty()) return false; + let (exist, _) = self.index_of(val); exist } /// Convert a big vector to a native vector, which is supposed to be called mostly by view functions to get an /// atomic view of the whole vector. /// Disclaimer: This function may be costly as the big vector may be huge in size. Use it at your own discretion. - public fun to_vector(v: &BigVector): vector { + public fun to_vector(self: &BigVector): vector { let res = vector[]; - let num_buckets = table_with_length::length(&v.buckets); - let i = 0; - while (i < num_buckets) { - vector::append(&mut res, *table_with_length::borrow(&v.buckets, i)); - i = i + 1; + let num_buckets = self.buckets.length(); + for (i in 0..num_buckets) { + res.append(*self.buckets.borrow(i)); }; res } /// Return the length of the vector. - public fun length(v: &BigVector): u64 { - v.end_index + public fun length(self: &BigVector): u64 { + self.end_index } /// Return `true` if the vector `v` has no elements and `false` otherwise. - public fun is_empty(v: &BigVector): bool { - length(v) == 0 + public fun is_empty(self: &BigVector): bool { + self.length() == 0 } #[test] @@ -305,37 +301,37 @@ module aptos_std::big_vector { let v = empty(5); let i = 0; while (i < 100) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; let j = 0; while (j < 100) { - let val = borrow(&v, j); + let val = v.borrow(j); assert!(*val == j, 0); - j = j + 1; + j += 1; }; while (i > 0) { - i = i - 1; - let (exist, index) = index_of(&v, &i); - let j = pop_back(&mut v); + i -= 1; + let (exist, index) = v.index_of(&i); + let j = v.pop_back(); assert!(exist, 0); assert!(index == i, 0); assert!(j == i, 0); }; while (i < 100) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; - let last_index = length(&v) - 1; - assert!(swap_remove(&mut v, last_index) == 99, 0); - assert!(swap_remove(&mut v, 0) == 0, 0); - while (length(&v) > 0) { + let last_index = v.length() - 1; + assert!(v.swap_remove(last_index) == 99, 0); + assert!(v.swap_remove(0) == 0, 0); + while (v.length() > 0) { // the vector is always [N, 1, 2, ... N-1] with repetitive swap_remove(&mut v, 0) - let expected = length(&v); - let val = swap_remove(&mut v, 0); + let expected = v.length(); + let val = v.swap_remove(0); assert!(val == expected, 0); }; - destroy_empty(v); + v.destroy_empty(); } #[test] @@ -344,13 +340,13 @@ module aptos_std::big_vector { let v2 = singleton(1u64, 7); let v3 = empty(6); let v4 = empty(8); - append(&mut v3, v4); - assert!(length(&v3) == 0, 0); - append(&mut v2, v3); - assert!(length(&v2) == 1, 0); - append(&mut v1, v2); - assert!(length(&v1) == 1, 0); - destroy(v1); + v3.append(v4); + assert!(v3.length() == 0, 0); + v2.append(v3); + assert!(v2.length() == 1, 0); + v1.append(v2); + assert!(v1.length() == 1, 0); + v1.destroy(); } #[test] @@ -359,21 +355,21 @@ module aptos_std::big_vector { let v2 = empty(7); let i = 0; while (i < 7) { - push_back(&mut v1, i); - i = i + 1; + v1.push_back(i); + i += 1; }; while (i < 25) { - push_back(&mut v2, i); - i = i + 1; + v2.push_back(i); + i += 1; }; - append(&mut v1, v2); - assert!(length(&v1) == 25, 0); + v1.append(v2); + assert!(v1.length() == 25, 0); i = 0; while (i < 25) { - assert!(*borrow(&v1, i) == i, 0); - i = i + 1; + assert!(*v1.borrow(i) == i, 0); + i += 1; }; - destroy(v1); + v1.destroy(); } #[test] @@ -381,16 +377,16 @@ module aptos_std::big_vector { let v1 = empty(7); let i = 0; while (i < 100) { - push_back(&mut v1, i); - i = i + 1; + v1.push_back(i); + i += 1; }; - let v2 = to_vector(&v1); + let v2 = v1.to_vector(); let j = 0; while (j < 100) { - assert!(*vector::borrow(&v2, j) == j, 0); - j = j + 1; + assert!(v2[j] == j, 0); + j += 1; }; - destroy(v1); + v1.destroy(); } #[test] @@ -398,32 +394,32 @@ module aptos_std::big_vector { let v = empty(11); let i = 0; while (i < 101) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; - remove(&mut v, 100); - remove(&mut v, 90); - remove(&mut v, 80); - remove(&mut v, 70); - remove(&mut v, 60); - remove(&mut v, 50); - remove(&mut v, 40); - remove(&mut v, 30); - remove(&mut v, 20); - remove(&mut v, 10); - remove(&mut v, 0); - assert!(length(&v) == 90, 0); + v.remove(100); + v.remove(90); + v.remove(80); + v.remove(70); + v.remove(60); + v.remove(50); + v.remove(40); + v.remove(30); + v.remove(20); + v.remove(10); + v.remove(0); + assert!(v.length() == 90, 0); let index = 0; i = 0; while (i < 101) { if (i % 10 != 0) { - assert!(*borrow(&v, index) == i, 0); - index = index + 1; + assert!(*v.borrow(index) == i, 0); + index += 1; }; - i = i + 1; + i += 1; }; - destroy(v); + v.destroy(); } #[test] @@ -431,20 +427,20 @@ module aptos_std::big_vector { let v = empty(11); let i = 0; while (i < 101) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; i = 0; while (i < 51) { - swap(&mut v, i, 100 - i); - i = i + 1; + v.swap(i, 100 - i); + i += 1; }; i = 0; while (i < 101) { - assert!(*borrow(&v, i) == 100 - i, 0); - i = i + 1; + assert!(*v.borrow(i) == 100 - i, 0); + i += 1; }; - destroy(v); + v.destroy(); } #[test] @@ -452,18 +448,18 @@ module aptos_std::big_vector { let v = empty(11); let i = 0; while (i < 100) { - push_back(&mut v, i); - let (found, idx) = index_of(&mut v, &i); + v.push_back(i); + let (found, idx) = v.index_of(&i); assert!(found && idx == i, 0); - i = i + 1; + i += 1; }; - destroy(v); + v.destroy(); } #[test] fun big_vector_empty_contains() { let v = empty(10); - assert!(!contains(&v, &(1 as u64)), 0); - destroy_empty(v); + assert!(!v.contains::(&(1 as u64)), 0); + v.destroy_empty(); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.spec.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.spec.move index 5556d4d3d2a8f..62edbc8e4099b 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/big_vector.spec.move @@ -42,85 +42,85 @@ spec aptos_std::big_vector { spec empty(bucket_size: u64): BigVector { aborts_if bucket_size == 0; - ensures length(result) == 0; + ensures result.length() == 0; ensures result.bucket_size == bucket_size; } spec singleton(element: T, bucket_size: u64): BigVector { aborts_if bucket_size == 0; - ensures length(result) == 1; + ensures result.length() == 1; ensures result.bucket_size == bucket_size; } - spec destroy_empty(v: BigVector) { - aborts_if !is_empty(v); + spec destroy_empty(self: BigVector) { + aborts_if !self.is_empty(); } - spec borrow(v: &BigVector, i: u64): &T { - aborts_if i >= length(v); - ensures result == spec_at(v, i); + spec borrow(self: &BigVector, i: u64): &T { + aborts_if i >= self.length(); + ensures result == spec_at(self, i); } - spec borrow_mut(v: &mut BigVector, i: u64): &mut T { - aborts_if i >= length(v); - ensures result == spec_at(v, i); + spec borrow_mut(self: &mut BigVector, i: u64): &mut T { + aborts_if i >= self.length(); + ensures result == spec_at(self, i); } - spec push_back(v: &mut BigVector, val: T) { - let num_buckets = spec_table_len(v.buckets); + spec push_back(self: &mut BigVector, val: T) { + let num_buckets = spec_table_len(self.buckets); include PushbackAbortsIf; - ensures length(v) == length(old(v)) + 1; - ensures v.end_index == old(v.end_index) + 1; - ensures spec_at(v, v.end_index-1) == val; - ensures forall i in 0..v.end_index-1: spec_at(v, i) == spec_at(old(v), i); - ensures v.bucket_size == old(v).bucket_size; + ensures self.length() == old(self).length() + 1; + ensures self.end_index == old(self.end_index) + 1; + ensures spec_at(self, self.end_index-1) == val; + ensures forall i in 0..self.end_index-1: spec_at(self, i) == spec_at(old(self), i); + ensures self.bucket_size == old(self).bucket_size; } spec schema PushbackAbortsIf { - v: BigVector; - let num_buckets = spec_table_len(v.buckets); - aborts_if num_buckets * v.bucket_size > MAX_U64; - aborts_if v.end_index + 1 > MAX_U64; + self: BigVector; + let num_buckets = spec_table_len(self.buckets); + aborts_if num_buckets * self.bucket_size > MAX_U64; + aborts_if self.end_index + 1 > MAX_U64; } - spec pop_back(v: &mut BigVector): T { - aborts_if is_empty(v); - ensures length(v) == length(old(v)) - 1; - ensures result == old(spec_at(v, v.end_index-1)); - ensures forall i in 0..v.end_index: spec_at(v, i) == spec_at(old(v), i); + spec pop_back(self: &mut BigVector): T { + aborts_if self.is_empty(); + ensures self.length() == old(self).length() - 1; + ensures result == old(spec_at(self, self.end_index-1)); + ensures forall i in 0..self.end_index: spec_at(self, i) == spec_at(old(self), i); } - spec swap_remove(v: &mut BigVector, i: u64): T { + spec swap_remove(self: &mut BigVector, i: u64): T { pragma verify_duration_estimate = 120; - aborts_if i >= length(v); - ensures length(v) == length(old(v)) - 1; - ensures result == spec_at(old(v), i); + aborts_if i >= self.length(); + ensures self.length() == old(self).length() - 1; + ensures result == spec_at(old(self), i); } - spec swap(v: &mut BigVector, i: u64, j: u64) { + spec swap(self: &mut BigVector, i: u64, j: u64) { pragma verify_duration_estimate = 1000; - aborts_if i >= length(v) || j >= length(v); - ensures length(v) == length(old(v)); - ensures spec_at(v, i) == spec_at(old(v), j); - ensures spec_at(v, j) == spec_at(old(v), i); - ensures forall idx in 0..length(v) + aborts_if i >= self.length() || j >= self.length(); + ensures self.length() == old(self).length(); + ensures spec_at(self, i) == spec_at(old(self), j); + ensures spec_at(self, j) == spec_at(old(self), i); + ensures forall idx in 0..self.length() where idx != i && idx != j: - spec_at(v, idx) == spec_at(old(v), idx); + spec_at(self, idx) == spec_at(old(self), idx); } - spec append(lhs: &mut BigVector, other: BigVector) { + spec append(self: &mut BigVector, other: BigVector) { pragma verify=false; } - spec remove(v: &mut BigVector, i: u64): T { + spec remove(self: &mut BigVector, i: u64): T { pragma verify=false; } - spec reverse(v: &mut BigVector) { + spec reverse(self: &mut BigVector) { pragma verify=false; } - spec index_of(v: &BigVector, val: &T): (bool, u64) { + spec index_of(self: &BigVector, val: &T): (bool, u64) { pragma verify=false; } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.move index 60a9565d0a221..c69caef6fc998 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.move @@ -4,6 +4,9 @@ /// when expanding to avoid unexpected gas cost. /// SmartTable uses faster hash function SipHash instead of cryptographically secure hash functions like sha3-256 since /// it tolerates collisions. +/// +/// DEPRECATED: since it's implementation is inneficient, it +/// has been deprecated in favor of `big_ordered_map.move`. module aptos_std::smart_table { use std::error; use std::vector; @@ -72,7 +75,7 @@ module aptos_std::smart_table { ): SmartTable { assert!(split_load_threshold <= 100, error::invalid_argument(EINVALID_LOAD_THRESHOLD_PERCENT)); let buckets = table_with_length::new(); - table_with_length::add(&mut buckets, 0, vector::empty()); + buckets.add(0, vector::empty()); let table = SmartTable { buckets, num_buckets: 1, @@ -87,42 +90,38 @@ module aptos_std::smart_table { num_initial_buckets = 2; }; while (num_initial_buckets > 1) { - num_initial_buckets = num_initial_buckets - 1; - split_one_bucket(&mut table); + num_initial_buckets -= 1; + table.split_one_bucket(); }; table } /// Destroy empty table. /// Aborts if it's not empty. - public fun destroy_empty(table: SmartTable) { - assert!(table.size == 0, error::invalid_argument(ENOT_EMPTY)); - let i = 0; - while (i < table.num_buckets) { - vector::destroy_empty(table_with_length::remove(&mut table.buckets, i)); - i = i + 1; + public fun destroy_empty(self: SmartTable) { + assert!(self.size == 0, error::invalid_argument(ENOT_EMPTY)); + for (i in 0..self.num_buckets) { + self.buckets.remove(i).destroy_empty(); }; - let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = table; - table_with_length::destroy_empty(buckets); + let SmartTable { buckets, num_buckets: _, level: _, size: _, split_load_threshold: _, target_bucket_size: _ } = self; + buckets.destroy_empty(); } /// Destroy a table completely when V has `drop`. - public fun destroy(table: SmartTable) { - clear(&mut table); - destroy_empty(table); + public fun destroy(self: SmartTable) { + self.clear(); + self.destroy_empty(); } /// Clear a table completely when T has `drop`. - public fun clear(table: &mut SmartTable) { - *table_with_length::borrow_mut(&mut table.buckets, 0) = vector::empty(); - let i = 1; - while (i < table.num_buckets) { - table_with_length::remove(&mut table.buckets, i); - i = i + 1; + public fun clear(self: &mut SmartTable) { + *self.buckets.borrow_mut(0) = vector::empty(); + for (i in 1..self.num_buckets) { + self.buckets.remove(i); }; - table.num_buckets = 1; - table.level = 0; - table.size = 0; + self.num_buckets = 1; + self.level = 0; + self.size = 0; } /// Add (key, value) pair in the hash map, it may grow one bucket if current load factor exceeds the threshold. @@ -130,41 +129,41 @@ module aptos_std::smart_table { /// For standard linear hash algorithm, it is stored as a variable but `num_buckets` here could be leveraged. /// Abort if `key` already exists. /// Note: This method may occasionally cost much more gas when triggering bucket split. - public fun add(table: &mut SmartTable, key: K, value: V) { + public fun add(self: &mut SmartTable, key: K, value: V) { let hash = sip_hash_from_value(&key); - let index = bucket_index(table.level, table.num_buckets, hash); - let bucket = table_with_length::borrow_mut(&mut table.buckets, index); + let index = bucket_index(self.level, self.num_buckets, hash); + let bucket = self.buckets.borrow_mut(index); // We set a per-bucket limit here with a upper bound (10000) that nobody should normally reach. - assert!(vector::length(bucket) <= 10000, error::permission_denied(EEXCEED_MAX_BUCKET_SIZE)); - assert!(vector::all(bucket, | entry | { + assert!(bucket.length() <= 10000, error::permission_denied(EEXCEED_MAX_BUCKET_SIZE)); + assert!(bucket.all(| entry | { let e: &Entry = entry; &e.key != &key }), error::invalid_argument(EALREADY_EXIST)); let e = Entry { hash, key, value }; - if (table.target_bucket_size == 0) { + if (self.target_bucket_size == 0) { let estimated_entry_size = max(size_of_val(&e), 1); - table.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1); + self.target_bucket_size = max(1024 /* free_write_quota */ / estimated_entry_size, 1); }; - vector::push_back(bucket, e); - table.size = table.size + 1; + bucket.push_back(e); + self.size += 1; - if (load_factor(table) >= (table.split_load_threshold as u64)) { - split_one_bucket(table); + if (self.load_factor() >= (self.split_load_threshold as u64)) { + self.split_one_bucket(); } } /// Add multiple key/value pairs to the smart table. The keys must not already exist. - public fun add_all(table: &mut SmartTable, keys: vector, values: vector) { - vector::zip(keys, values, |key, value| { add(table, key, value); }); + public fun add_all(self: &mut SmartTable, keys: vector, values: vector) { + keys.zip(values, |key, value| { self.add(key, value); }); } inline fun unzip_entries(entries: &vector>): (vector, vector) { let keys = vector[]; let values = vector[]; - vector::for_each_ref(entries, |e|{ + entries.for_each_ref(|e|{ let entry: &Entry = e; - vector::push_back(&mut keys, entry.key); - vector::push_back(&mut values, entry.value); + keys.push_back(entry.key); + values.push_back(entry.value); }); (keys, values) } @@ -173,14 +172,12 @@ module aptos_std::smart_table { /// view of the whole table. /// Disclaimer: This function may be costly as the smart table may be huge in size. Use it at your own discretion. public fun to_simple_map( - table: &SmartTable, + self: &SmartTable, ): SimpleMap { - let i = 0; let res = simple_map::new(); - while (i < table.num_buckets) { - let (keys, values) = unzip_entries(table_with_length::borrow(&table.buckets, i)); - simple_map::add_all(&mut res, keys, values); - i = i + 1; + for (i in 0..self.num_buckets) { + let (keys, values) = unzip_entries(self.buckets.borrow(i)); + res.add_all(keys, values); }; res } @@ -190,9 +187,9 @@ module aptos_std::smart_table { /// For a large enough smart table this function will fail due to execution gas limits, and /// `keys_paginated` should be used instead. public fun keys( - table_ref: &SmartTable + self: &SmartTable ): vector { - let (keys, _, _) = keys_paginated(table_ref, 0, 0, length(table_ref)); + let (keys, _, _) = self.keys_paginated(0, 0, self.length()); keys } @@ -210,7 +207,7 @@ module aptos_std::smart_table { /// returned bucket index and vector index value options are both none, which means that /// pagination is complete. For an example, see `test_keys()`. public fun keys_paginated( - table_ref: &SmartTable, + self: &SmartTable, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64, @@ -219,11 +216,11 @@ module aptos_std::smart_table { Option, Option, ) { - let num_buckets = table_ref.num_buckets; - let buckets_ref = &table_ref.buckets; + let num_buckets = self.num_buckets; + let buckets_ref = &self.buckets; assert!(starting_bucket_index < num_buckets, EINVALID_BUCKET_INDEX); - let bucket_ref = table_with_length::borrow(buckets_ref, starting_bucket_index); - let bucket_length = vector::length(bucket_ref); + let bucket_ref = buckets_ref.borrow(starting_bucket_index); + let bucket_length = bucket_ref.length(); assert!( // In the general case, starting vector index should never be equal to bucket length // because then iteration will attempt to borrow a vector element that is out of bounds. @@ -237,15 +234,15 @@ module aptos_std::smart_table { if (num_keys_to_get == 0) return (keys, option::some(starting_bucket_index), option::some(starting_vector_index)); for (bucket_index in starting_bucket_index..num_buckets) { - bucket_ref = table_with_length::borrow(buckets_ref, bucket_index); - bucket_length = vector::length(bucket_ref); + bucket_ref = buckets_ref.borrow(bucket_index); + bucket_length = bucket_ref.length(); for (vector_index in starting_vector_index..bucket_length) { - vector::push_back(&mut keys, vector::borrow(bucket_ref, vector_index).key); - num_keys_to_get = num_keys_to_get - 1; + keys.push_back(bucket_ref.borrow(vector_index).key); + num_keys_to_get -= 1; if (num_keys_to_get == 0) { - vector_index = vector_index + 1; + vector_index += 1; return if (vector_index == bucket_length) { - bucket_index = bucket_index + 1; + bucket_index += 1; if (bucket_index < num_buckets) { (keys, option::some(bucket_index), option::some(0)) } else { @@ -262,23 +259,23 @@ module aptos_std::smart_table { } /// Decide which is the next bucket to split and split it into two with the elements inside the bucket. - fun split_one_bucket(table: &mut SmartTable) { - let new_bucket_index = table.num_buckets; + fun split_one_bucket(self: &mut SmartTable) { + let new_bucket_index = self.num_buckets; // the next bucket to split is num_bucket without the most significant bit. - let to_split = new_bucket_index ^ (1 << table.level); - table.num_buckets = new_bucket_index + 1; + let to_split = new_bucket_index ^ (1 << self.level); + self.num_buckets = new_bucket_index + 1; // if the whole level is splitted once, bump the level. - if (to_split + 1 == 1 << table.level) { - table.level = table.level + 1; + if (to_split + 1 == 1 << self.level) { + self.level += 1; }; - let old_bucket = table_with_length::borrow_mut(&mut table.buckets, to_split); + let old_bucket = self.buckets.borrow_mut(to_split); // partition the bucket, [0..p) stays in old bucket, [p..len) goes to new bucket - let p = vector::partition(old_bucket, |e| { + let p = old_bucket.partition(|e| { let entry: &Entry = e; // Explicit type to satisfy compiler - bucket_index(table.level, table.num_buckets, entry.hash) != new_bucket_index + bucket_index(self.level, self.num_buckets, entry.hash) != new_bucket_index }); - let new_bucket = vector::trim_reverse(old_bucket, p); - table_with_length::add(&mut table.buckets, new_bucket_index, new_bucket); + let new_bucket = old_bucket.trim_reverse(p); + self.buckets.add(new_bucket_index, new_bucket); } /// Return the expected bucket index to find the hash. @@ -297,44 +294,40 @@ module aptos_std::smart_table { /// Acquire an immutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow(table: &SmartTable, key: K): &V { - let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key)); - let bucket = table_with_length::borrow(&table.buckets, index); - let i = 0; - let len = vector::length(bucket); - while (i < len) { - let entry = vector::borrow(bucket, i); + public fun borrow(self: &SmartTable, key: K): &V { + let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key)); + let bucket = self.buckets.borrow(index); + let len = bucket.length(); + for (i in 0..len) { + let entry = bucket.borrow(i); if (&entry.key == &key) { return &entry.value }; - i = i + 1; }; abort error::invalid_argument(ENOT_FOUND) } /// Acquire an immutable reference to the value which `key` maps to. /// Returns specified default value if there is no entry for `key`. - public fun borrow_with_default(table: &SmartTable, key: K, default: &V): &V { - if (!contains(table, copy key)) { + public fun borrow_with_default(self: &SmartTable, key: K, default: &V): &V { + if (!self.contains(copy key)) { default } else { - borrow(table, copy key) + self.borrow(copy key) } } /// Acquire a mutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow_mut(table: &mut SmartTable, key: K): &mut V { - let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key)); - let bucket = table_with_length::borrow_mut(&mut table.buckets, index); - let i = 0; - let len = vector::length(bucket); - while (i < len) { - let entry = vector::borrow_mut(bucket, i); + public fun borrow_mut(self: &mut SmartTable, key: K): &mut V { + let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key)); + let bucket = self.buckets.borrow_mut(index); + let len = bucket.length(); + for (i in 0..len) { + let entry = bucket.borrow_mut(i); if (&entry.key == &key) { return &mut entry.value }; - i = i + 1; }; abort error::invalid_argument(ENOT_FOUND) } @@ -342,159 +335,144 @@ module aptos_std::smart_table { /// Acquire a mutable reference to the value which `key` maps to. /// Insert the pair (`key`, `default`) first if there is no entry for `key`. public fun borrow_mut_with_default( - table: &mut SmartTable, + self: &mut SmartTable, key: K, default: V ): &mut V { - if (!contains(table, copy key)) { - add(table, copy key, default) + if (!self.contains(copy key)) { + self.add(copy key, default) }; - borrow_mut(table, key) + self.borrow_mut(key) } /// Returns true iff `table` contains an entry for `key`. - public fun contains(table: &SmartTable, key: K): bool { + public fun contains(self: &SmartTable, key: K): bool { let hash = sip_hash_from_value(&key); - let index = bucket_index(table.level, table.num_buckets, hash); - let bucket = table_with_length::borrow(&table.buckets, index); - vector::any(bucket, | entry | { - let e: &Entry = entry; + let index = bucket_index(self.level, self.num_buckets, hash); + let bucket = self.buckets.borrow(index); + bucket.any(| e | { e.hash == hash && &e.key == &key }) } /// Remove from `table` and return the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun remove(table: &mut SmartTable, key: K): V { - let index = bucket_index(table.level, table.num_buckets, sip_hash_from_value(&key)); - let bucket = table_with_length::borrow_mut(&mut table.buckets, index); - let i = 0; - let len = vector::length(bucket); - while (i < len) { - let entry = vector::borrow(bucket, i); + public fun remove(self: &mut SmartTable, key: K): V { + let index = bucket_index(self.level, self.num_buckets, sip_hash_from_value(&key)); + let bucket = self.buckets.borrow_mut(index); + let len = bucket.length(); + for (i in 0..len) { + let entry = bucket.borrow(i); if (&entry.key == &key) { - let Entry { hash: _, key: _, value } = vector::swap_remove(bucket, i); - table.size = table.size - 1; + let Entry { hash: _, key: _, value } = bucket.swap_remove(i); + self.size -= 1; return value }; - i = i + 1; }; abort error::invalid_argument(ENOT_FOUND) } /// Insert the pair (`key`, `value`) if there is no entry for `key`. /// update the value of the entry for `key` to `value` otherwise - public fun upsert(table: &mut SmartTable, key: K, value: V) { - if (!contains(table, copy key)) { - add(table, copy key, value) + public fun upsert(self: &mut SmartTable, key: K, value: V) { + if (!self.contains(copy key)) { + self.add(copy key, value) } else { - let ref = borrow_mut(table, key); + let ref = self.borrow_mut(key); *ref = value; }; } /// Returns the length of the table, i.e. the number of entries. - public fun length(table: &SmartTable): u64 { - table.size + public fun length(self: &SmartTable): u64 { + self.size } /// Return the load factor of the hashtable. - public fun load_factor(table: &SmartTable): u64 { - table.size * 100 / table.num_buckets / table.target_bucket_size + public fun load_factor(self: &SmartTable): u64 { + self.size * 100 / self.num_buckets / self.target_bucket_size } /// Update `split_load_threshold`. - public fun update_split_load_threshold(table: &mut SmartTable, split_load_threshold: u8) { + public fun update_split_load_threshold(self: &mut SmartTable, split_load_threshold: u8) { assert!( split_load_threshold <= 100 && split_load_threshold > 0, error::invalid_argument(EINVALID_LOAD_THRESHOLD_PERCENT) ); - table.split_load_threshold = split_load_threshold; + self.split_load_threshold = split_load_threshold; } /// Update `target_bucket_size`. - public fun update_target_bucket_size(table: &mut SmartTable, target_bucket_size: u64) { + public fun update_target_bucket_size(self: &mut SmartTable, target_bucket_size: u64) { assert!(target_bucket_size > 0, error::invalid_argument(EINVALID_TARGET_BUCKET_SIZE)); - table.target_bucket_size = target_bucket_size; + self.target_bucket_size = target_bucket_size; } /// Apply the function to a reference of each key-value pair in the table. - public inline fun for_each_ref(table: &SmartTable, f: |&K, &V|) { - let i = 0; - while (i < aptos_std::smart_table::num_buckets(table)) { - vector::for_each_ref( - aptos_std::table_with_length::borrow(aptos_std::smart_table::borrow_buckets(table), i), - |elem| { - let (key, value) = aptos_std::smart_table::borrow_kv(elem); - f(key, value) - } - ); - i = i + 1; + public inline fun for_each_ref(self: &SmartTable, f: |&K, &V|) { + for (i in 0..self.num_buckets()) { + self.borrow_buckets().borrow(i).for_each_ref(|elem| { + let (key, value) = elem.borrow_kv(); + f(key, value) + }); } } /// Apply the function to a mutable reference of each key-value pair in the table. - public inline fun for_each_mut(table: &mut SmartTable, f: |&K, &mut V|) { - let i = 0; - while (i < aptos_std::smart_table::num_buckets(table)) { - vector::for_each_mut( - table_with_length::borrow_mut(aptos_std::smart_table::borrow_buckets_mut(table), i), - |elem| { - let (key, value) = aptos_std::smart_table::borrow_kv_mut(elem); - f(key, value) - } - ); - i = i + 1; + public inline fun for_each_mut(self: &mut SmartTable, f: |&K, &mut V|) { + for (i in 0..self.num_buckets()) { + self.borrow_buckets_mut().borrow_mut(i).for_each_mut(|elem| { + let (key, value) = elem.borrow_kv_mut(); + f(key, value) + }); }; } /// Map the function over the references of key-value pairs in the table without modifying it. public inline fun map_ref( - table: &SmartTable, + self: &SmartTable, f: |&V1|V2 ): SmartTable { let new_table = new(); - for_each_ref(table, |key, value| add(&mut new_table, *key, f(value))); + self.for_each_ref(|key, value| new_table.add(*key, f(value))); new_table } /// Return true if any key-value pair in the table satisfies the predicate. public inline fun any( - table: &SmartTable, + self: &SmartTable, p: |&K, &V|bool ): bool { let found = false; - let i = 0; - while (i < aptos_std::smart_table::num_buckets(table)) { - found = vector::any(table_with_length::borrow(aptos_std::smart_table::borrow_buckets(table), i), |elem| { - let (key, value) = aptos_std::smart_table::borrow_kv(elem); + for (i in 0..self.num_buckets()) { + found = self.borrow_buckets().borrow(i).any(|elem| { + let (key, value) = elem.borrow_kv(); p(key, value) }); if (found) break; - i = i + 1; }; found } // Helper functions to circumvent the scope issue of inline functions. - public fun borrow_kv(e: &Entry): (&K, &V) { - (&e.key, &e.value) + public fun borrow_kv(self: &Entry): (&K, &V) { + (&self.key, &self.value) } - public fun borrow_kv_mut(e: &mut Entry): (&mut K, &mut V) { - (&mut e.key, &mut e.value) + public fun borrow_kv_mut(self: &mut Entry): (&mut K, &mut V) { + (&mut self.key, &mut self.value) } - public fun num_buckets(table: &SmartTable): u64 { - table.num_buckets + public fun num_buckets(self: &SmartTable): u64 { + self.num_buckets } - public fun borrow_buckets(table: &SmartTable): &TableWithLength>> { - &table.buckets + public fun borrow_buckets(self: &SmartTable): &TableWithLength>> { + &self.buckets } - public fun borrow_buckets_mut(table: &mut SmartTable): &mut TableWithLength>> { - &mut table.buckets + public fun borrow_buckets_mut(self: &mut SmartTable): &mut TableWithLength>> { + &mut self.buckets } @@ -503,24 +481,24 @@ module aptos_std::smart_table { let table = new(); let i = 0; while (i < 200) { - add(&mut table, i, i); - i = i + 1; + table.add(i, i); + i += 1; }; - assert!(length(&table) == 200, 0); + assert!(table.length() == 200, 0); i = 0; while (i < 200) { - *borrow_mut(&mut table, i) = i * 2; - assert!(*borrow(&table, i) == i * 2, 0); - i = i + 1; + *table.borrow_mut(i) = i * 2; + assert!(*table.borrow(i) == i * 2, 0); + i += 1; }; i = 0; assert!(table.num_buckets > 5, table.num_buckets); while (i < 200) { - assert!(contains(&table, i), 0); - assert!(remove(&mut table, i) == i * 2, 0); - i = i + 1; + assert!(table.contains(i), 0); + assert!(table.remove(i) == i * 2, 0); + i += 1; }; - destroy_empty(table); + table.destroy_empty(); } #[test] @@ -531,21 +509,21 @@ module aptos_std::smart_table { while (i <= 256) { assert!(table.num_buckets == i, 0); assert!(table.level == level, i); - add(&mut table, i, i); - i = i + 1; + table.add(i, i); + i += 1; if (i == 1 << (level + 1)) { - level = level + 1; + level += 1; }; }; let i = 1; while (i <= 256) { - assert!(*borrow(&table, i) == i, 0); - i = i + 1; + assert!(*table.borrow(i) == i, 0); + i += 1; }; assert!(table.num_buckets == 257, table.num_buckets); - assert!(load_factor(&table) == 99, 0); - assert!(length(&table) == 256, 0); - destroy(table); + assert!(table.load_factor() == 99, 0); + assert!(table.length() == 256, 0); + table.destroy(); } #[test] @@ -553,43 +531,43 @@ module aptos_std::smart_table { let table = new(); let i = 0; while (i < 200) { - add(&mut table, i, i); - i = i + 1; + table.add(i, i); + i += 1; }; - assert!(length(&table) == 200, 0); - update_target_bucket_size(&mut table, 10); - update_split_load_threshold(&mut table, 50); + assert!(table.length() == 200, 0); + table.update_target_bucket_size(10); + table.update_split_load_threshold(50); while (i < 400) { - add(&mut table, i, i); - i = i + 1; + table.add(i, i); + i += 1; }; - assert!(length(&table) == 400, 0); + assert!(table.length() == 400, 0); i = 0; while (i < 400) { - assert!(contains(&table, i), 0); - assert!(remove(&mut table, i) == i, 0); - i = i + 1; + assert!(table.contains(i), 0); + assert!(table.remove(i) == i, 0); + i += 1; }; - destroy_empty(table); + table.destroy_empty(); } #[test] public fun smart_table_add_all_test() { let table: SmartTable = new_with_config(1, 100, 2); - assert!(length(&table) == 0, 0); - add_all(&mut table, vector[1, 2, 3, 4, 5, 6, 7], vector[1, 2, 3, 4, 5, 6, 7]); - assert!(length(&table) == 7, 1); + assert!(table.length() == 0, 0); + table.add_all(vector[1, 2, 3, 4, 5, 6, 7], vector[1, 2, 3, 4, 5, 6, 7]); + assert!(table.length() == 7, 1); let i = 1; while (i < 8) { - assert!(*borrow(&table, i) == i, 0); - i = i + 1; + assert!(*table.borrow(i) == i, 0); + i += 1; }; - i = i - 1; + i -= 1; while (i > 0) { - remove(&mut table, i); - i = i - 1; + table.remove(i); + i -= 1; }; - destroy_empty(table); + table.destroy_empty(); } #[test] @@ -597,12 +575,12 @@ module aptos_std::smart_table { let table = new(); let i = 0; while (i < 200) { - add(&mut table, i, i); - i = i + 1; + table.add(i, i); + i += 1; }; - let map = to_simple_map(&table); - assert!(simple_map::length(&map) == 200, 0); - destroy(table); + let map = table.to_simple_map(); + assert!(map.length() == 200, 0); + table.destroy(); } #[test] @@ -610,17 +588,17 @@ module aptos_std::smart_table { let table = new(); let i = 0u64; while (i < 200) { - add(&mut table, i, i); - i = i + 1; + table.add(i, i); + i += 1; }; - clear(&mut table); + table.clear(); let i = 0; while (i < 200) { - add(&mut table, i, i); - i = i + 1; + table.add(i, i); + i += 1; }; assert!(table.size == 200, 0); - destroy(table); + table.destroy(); } #[test] @@ -628,73 +606,70 @@ module aptos_std::smart_table { let i = 0; let table = new(); let expected_keys = vector[]; - let keys = keys(&table); - assert!(vector::is_empty(&keys), 0); + let keys = table.keys(); + assert!(keys.is_empty(), 0); let starting_bucket_index = 0; let starting_vector_index = 0; - let (keys, starting_bucket_index_r, starting_vector_index_r) = keys_paginated( - &table, + let (keys, starting_bucket_index_r, starting_vector_index_r) = table.keys_paginated( starting_bucket_index, starting_vector_index, 0 ); assert!(starting_bucket_index_r == option::some(starting_bucket_index), 0); assert!(starting_vector_index_r == option::some(starting_vector_index), 0); - assert!(vector::is_empty(&keys), 0); + assert!(keys.is_empty(), 0); while (i < 100) { - add(&mut table, i, 0); - vector::push_back(&mut expected_keys, i); - i = i + 1; - }; - let keys = keys(&table); - assert!(vector::length(&keys) == vector::length(&expected_keys), 0); - vector::for_each_ref(&keys, |e_ref| { - assert!(vector::contains(&expected_keys, e_ref), 0); + table.add(i, 0); + expected_keys.push_back(i); + i += 1; + }; + let keys = table.keys(); + assert!(keys.length() == expected_keys.length(), 0); + keys.for_each_ref(|e_ref| { + assert!(expected_keys.contains(e_ref), 0); }); let keys = vector[]; let starting_bucket_index = 0; let starting_vector_index = 0; let returned_keys = vector[]; - vector::length(&returned_keys); // To eliminate erroneous compiler "unused" warning + returned_keys.length(); // To eliminate erroneous compiler "unused" warning loop { - (returned_keys, starting_bucket_index_r, starting_vector_index_r) = keys_paginated( - &table, + (returned_keys, starting_bucket_index_r, starting_vector_index_r) = table.keys_paginated( starting_bucket_index, starting_vector_index, 15 ); - vector::append(&mut keys, returned_keys); + keys.append(returned_keys); if ( starting_bucket_index_r == option::none() || starting_vector_index_r == option::none() ) break; - starting_bucket_index = option::destroy_some(starting_bucket_index_r); - starting_vector_index = option::destroy_some(starting_vector_index_r); + starting_bucket_index = starting_bucket_index_r.destroy_some(); + starting_vector_index = starting_vector_index_r.destroy_some(); }; - assert!(vector::length(&keys) == vector::length(&expected_keys), 0); - vector::for_each_ref(&keys, |e_ref| { - assert!(vector::contains(&expected_keys, e_ref), 0); + assert!(keys.length() == expected_keys.length(), 0); + keys.for_each_ref(|e_ref| { + assert!(expected_keys.contains(e_ref), 0); }); - destroy(table); + table.destroy(); table = new(); - add(&mut table, 1, 0); - add(&mut table, 2, 0); - (keys, starting_bucket_index_r, starting_vector_index_r) = keys_paginated(&table, 0, 0, 1); - (returned_keys, starting_bucket_index_r, starting_vector_index_r) = keys_paginated( - &table, - option::destroy_some(starting_bucket_index_r), - option::destroy_some(starting_vector_index_r), - 1, + table.add(1, 0); + table.add(2, 0); + (keys, starting_bucket_index_r, starting_vector_index_r) = table.keys_paginated(0, 0, 1); + (returned_keys, starting_bucket_index_r, starting_vector_index_r) = table.keys_paginated( + starting_bucket_index_r.destroy_some(), + starting_vector_index_r.destroy_some(), + 1 ); - vector::append(&mut keys, returned_keys); + keys.append(returned_keys); assert!(keys == vector[1, 2] || keys == vector[2, 1], 0); assert!(starting_bucket_index_r == option::none(), 0); assert!(starting_vector_index_r == option::none(), 0); - (keys, starting_bucket_index_r, starting_vector_index_r) = keys_paginated(&table, 0, 0, 0); + (keys, starting_bucket_index_r, starting_vector_index_r) = table.keys_paginated(0, 0, 0); assert!(keys == vector[], 0); assert!(starting_bucket_index_r == option::some(0), 0); assert!(starting_vector_index_r == option::some(0), 0); - destroy(table); + table.destroy(); } #[test] @@ -702,68 +677,57 @@ module aptos_std::smart_table { let table = new(); let expected_keys = vector[]; for (i in 0..100) { - add(&mut table, i, 0); - vector::push_back(&mut expected_keys, i); + table.add(i, 0); + expected_keys.push_back(i); }; let (keys, starting_bucket_index_r, starting_vector_index_r) = - keys_paginated(&table, 0, 0, 5); // Both indices 0. - assert!(vector::length(&keys) == 5, 0); - vector::for_each_ref(&keys, |e_ref| { - assert!(vector::contains(&expected_keys, e_ref), 0); + table.keys_paginated(0, 0, 5); // Both indices 0. + assert!(keys.length() == 5, 0); + keys.for_each_ref(|e_ref| { + assert!(expected_keys.contains(e_ref), 0); }); - let starting_bucket_index = option::destroy_some(starting_bucket_index_r); - let starting_vector_index = option::destroy_some(starting_vector_index_r); - (keys, starting_bucket_index_r, starting_vector_index_r) = keys_paginated( - &table, + let starting_bucket_index = starting_bucket_index_r.destroy_some(); + let starting_vector_index = starting_vector_index_r.destroy_some(); + (keys, starting_bucket_index_r, starting_vector_index_r) = table.keys_paginated( starting_bucket_index, starting_vector_index, - 0, // Number of keys 0. + 0 ); assert!(keys == vector[], 0); assert!(starting_bucket_index_r == option::some(starting_bucket_index), 0); assert!(starting_vector_index_r == option::some(starting_vector_index), 0); - (keys, starting_bucket_index_r, starting_vector_index_r) = keys_paginated( - &table, - starting_bucket_index, - 0, // Vector index 0. - 50, - ); - assert!(vector::length(&keys) == 50, 0); - vector::for_each_ref(&keys, |e_ref| { - assert!(vector::contains(&expected_keys, e_ref), 0); + (keys, starting_bucket_index_r, starting_vector_index_r) = table.keys_paginated(starting_bucket_index, 0, 50); + assert!(keys.length() == 50, 0); + keys.for_each_ref(|e_ref| { + assert!(expected_keys.contains(e_ref), 0); }); - let starting_bucket_index = option::destroy_some(starting_bucket_index_r); + let starting_bucket_index = starting_bucket_index_r.destroy_some(); assert!(starting_bucket_index > 0, 0); - assert!(option::is_some(&starting_vector_index_r), 0); - (keys, starting_bucket_index_r, starting_vector_index_r) = keys_paginated( - &table, - 0, // Bucket index 0. - 1, - 50, - ); - assert!(vector::length(&keys) == 50, 0); - vector::for_each_ref(&keys, |e_ref| { - assert!(vector::contains(&expected_keys, e_ref), 0); + assert!(starting_vector_index_r.is_some(), 0); + (keys, starting_bucket_index_r, starting_vector_index_r) = table.keys_paginated(0, 1, 50); + assert!(keys.length() == 50, 0); + keys.for_each_ref(|e_ref| { + assert!(expected_keys.contains(e_ref), 0); }); - assert!(option::is_some(&starting_bucket_index_r), 0); - assert!(option::is_some(&starting_vector_index_r), 0); - destroy(table); + assert!(starting_bucket_index_r.is_some(), 0); + assert!(starting_vector_index_r.is_some(), 0); + table.destroy(); } #[test, expected_failure(abort_code = EINVALID_BUCKET_INDEX)] fun test_keys_invalid_bucket_index() { - let table = new(); - add(&mut table, 1, 0); + let table = new(); + table.add(1, 0); let num_buckets = table.num_buckets; - keys_paginated(&table, num_buckets + 1, 0, 1); - destroy(table); + table.keys_paginated(num_buckets + 1, 0, 1); + table.destroy(); } #[test, expected_failure(abort_code = EINVALID_VECTOR_INDEX)] fun test_keys_invalid_vector_index() { let table = new(); - add(&mut table, 1, 0); - keys_paginated(&table, 0, 1, 1); - destroy(table); + table.add(1, 0); + table.keys_paginated(0, 1, 1); + table.destroy(); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.spec.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.spec.move index 113bb4f06cabf..b872e1234dd6f 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_table.spec.move @@ -11,6 +11,7 @@ spec aptos_std::smart_table { map_borrow = borrow, map_borrow_mut = borrow_mut, map_borrow_mut_with_default = borrow_mut_with_default, + map_borrow_with_default = borrow_with_default, map_spec_get = spec_get, map_spec_set = spec_set, map_spec_del = spec_remove, @@ -22,15 +23,17 @@ spec aptos_std::smart_table { pragma verify = false; } - spec destroy(table: SmartTable) { + spec destroy(self: SmartTable) { pragma verify = false; + pragma opaque; } - spec clear(table: &mut SmartTable) { + spec clear(self: &mut SmartTable) { pragma verify = false; + pragma opaque; } - spec split_one_bucket(table: &mut SmartTable) { + spec split_one_bucket(self: &mut SmartTable) { pragma verify = false; } @@ -38,26 +41,26 @@ spec aptos_std::smart_table { pragma verify = false; } - spec borrow_with_default(table: &SmartTable, key: K, default: &V): &V { + spec borrow_with_default(self: &SmartTable, key: K, default: &V): &V { pragma verify = false; } - spec load_factor(table: &SmartTable): u64 { + spec load_factor(self: &SmartTable): u64 { pragma verify = false; } spec to_simple_map( - table: &SmartTable, + self: &SmartTable, ): SimpleMap { pragma verify = false; } - spec keys(table_ref: &SmartTable): vector { + spec keys(self: &SmartTable): vector { pragma verify = false; } spec keys_paginated( - table_ref: &SmartTable, + self: &SmartTable, starting_bucket_index: u64, starting_vector_index: u64, num_keys_to_get: u64, @@ -69,35 +72,35 @@ spec aptos_std::smart_table { pragma verify = false; } - spec add_all(table: &mut SmartTable, keys: vector, values: vector) { + spec add_all(self: &mut SmartTable, keys: vector, values: vector) { pragma verify = false; } - spec update_split_load_threshold(table: &mut SmartTable, split_load_threshold: u8) { + spec update_split_load_threshold(self: &mut SmartTable, split_load_threshold: u8) { pragma verify = false; } - spec update_target_bucket_size(table: &mut SmartTable, target_bucket_size: u64) { + spec update_target_bucket_size(self: &mut SmartTable, target_bucket_size: u64) { pragma verify = false; } - spec borrow_kv(e: &Entry): (&K, &V) { + spec borrow_kv(self: &Entry): (&K, &V) { pragma verify = false; } - spec borrow_kv_mut(e: &mut Entry): (&mut K, &mut V) { + spec borrow_kv_mut(self: &mut Entry): (&mut K, &mut V) { pragma verify = false; } - spec num_buckets(table: &SmartTable): u64 { + spec num_buckets(self: &SmartTable): u64 { pragma verify = false; } - spec borrow_buckets(table: &SmartTable): &TableWithLength>> { + spec borrow_buckets(self: &SmartTable): &TableWithLength>> { pragma verify = false; } - spec borrow_buckets_mut(table: &mut SmartTable): &mut TableWithLength>> { + spec borrow_buckets_mut(self: &mut SmartTable): &mut TableWithLength>> { pragma verify = false; } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move index 10f3c816b2fa7..ffbafe29eb930 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.move @@ -1,6 +1,5 @@ module aptos_std::smart_vector { use std::error; - use std::vector; use aptos_std::big_vector::{Self, BigVector}; use aptos_std::math64::max; use aptos_std::type_info::size_of_val; @@ -63,490 +62,476 @@ module aptos_std::smart_vector { /// Create a vector of length 1 containing the passed in T. public fun singleton(element: T): SmartVector { let v = empty(); - push_back(&mut v, element); + v.push_back(element); v } - /// Destroy the vector `v`. - /// Aborts if `v` is not empty. - public fun destroy_empty(v: SmartVector) { - assert!(is_empty(&v), error::invalid_argument(EVECTOR_NOT_EMPTY)); - let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = v; - vector::destroy_empty(inline_vec); - option::destroy_none(big_vec); + /// Destroy the vector `self`. + /// Aborts if `self` is not empty. + public fun destroy_empty(self: SmartVector) { + assert!(self.is_empty(), error::invalid_argument(EVECTOR_NOT_EMPTY)); + let SmartVector { inline_vec, big_vec, inline_capacity: _, bucket_size: _ } = self; + inline_vec.destroy_empty(); + big_vec.destroy_none(); } /// Destroy a vector completely when T has `drop`. - public fun destroy(v: SmartVector) { - clear(&mut v); - destroy_empty(v); + public fun destroy(self: SmartVector) { + self.clear(); + self.destroy_empty(); } /// Clear a vector completely when T has `drop`. - public fun clear(v: &mut SmartVector) { - v.inline_vec = vector[]; - if (option::is_some(&v.big_vec)) { - big_vector::destroy(option::extract(&mut v.big_vec)); + public fun clear(self: &mut SmartVector) { + self.inline_vec = vector[]; + if (self.big_vec.is_some()) { + self.big_vec.extract().destroy(); } } - /// Acquire an immutable reference to the `i`th T of the vector `v`. + /// Acquire an immutable reference to the `i`th T of the vector `self`. /// Aborts if `i` is out of bounds. - public fun borrow(v: &SmartVector, i: u64): &T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); + public fun borrow(self: &SmartVector, i: u64): &T { + assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + let inline_len = self.inline_vec.length(); if (i < inline_len) { - vector::borrow(&v.inline_vec, i) + self.inline_vec.borrow(i) } else { - big_vector::borrow(option::borrow(&v.big_vec), i - inline_len) + self.big_vec.borrow().borrow(i - inline_len) } } - /// Return a mutable reference to the `i`th T in the vector `v`. + /// Return a mutable reference to the `i`th T in the vector `self`. /// Aborts if `i` is out of bounds. - public fun borrow_mut(v: &mut SmartVector, i: u64): &mut T { - assert!(i < length(v), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); + public fun borrow_mut(self: &mut SmartVector, i: u64): &mut T { + assert!(i < self.length(), error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); + let inline_len = self.inline_vec.length(); if (i < inline_len) { - vector::borrow_mut(&mut v.inline_vec, i) + self.inline_vec.borrow_mut(i) } else { - big_vector::borrow_mut(option::borrow_mut(&mut v.big_vec), i - inline_len) + self.big_vec.borrow_mut().borrow_mut(i - inline_len) } } - /// Empty and destroy the other vector, and push each of the Ts in the other vector onto the lhs vector in the + /// Empty and destroy the other vector, and push each of the Ts in the other vector onto the self vector in the /// same order as they occurred in other. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun append(lhs: &mut SmartVector, other: SmartVector) { - let other_len = length(&other); + public fun append(self: &mut SmartVector, other: SmartVector) { + let other_len = other.length(); let half_other_len = other_len / 2; let i = 0; while (i < half_other_len) { - push_back(lhs, swap_remove(&mut other, i)); - i = i + 1; + self.push_back(other.swap_remove(i)); + i += 1; }; while (i < other_len) { - push_back(lhs, pop_back(&mut other)); - i = i + 1; + self.push_back(other.pop_back()); + i += 1; }; - destroy_empty(other); + other.destroy_empty(); } /// Add multiple values to the vector at once. - public fun add_all(v: &mut SmartVector, vals: vector) { - vector::for_each(vals, |val| { push_back(v, val); }) + public fun add_all(self: &mut SmartVector, vals: vector) { + vals.for_each(|val| { self.push_back(val); }) } /// Convert a smart vector to a native vector, which is supposed to be called mostly by view functions to get an /// atomic view of the whole vector. /// Disclaimer: This function may be costly as the smart vector may be huge in size. Use it at your own discretion. - public fun to_vector(v: &SmartVector): vector { - let res = v.inline_vec; - if (option::is_some(&v.big_vec)) { - let big_vec = option::borrow(&v.big_vec); - vector::append(&mut res, big_vector::to_vector(big_vec)); + public fun to_vector(self: &SmartVector): vector { + let res = self.inline_vec; + if (self.big_vec.is_some()) { + let big_vec = self.big_vec.borrow(); + res.append(big_vec.to_vector()); }; res } - /// Add T `val` to the end of the vector `v`. It grows the buckets when the current buckets are full. + /// Add T `val` to the end of the vector `self`. It grows the buckets when the current buckets are full. /// This operation will cost more gas when it adds new bucket. - public fun push_back(v: &mut SmartVector, val: T) { - let len = length(v); - let inline_len = vector::length(&v.inline_vec); + public fun push_back(self: &mut SmartVector, val: T) { + let len = self.length(); + let inline_len = self.inline_vec.length(); if (len == inline_len) { - let bucket_size = if (option::is_some(&v.inline_capacity)) { - if (len < *option::borrow(&v.inline_capacity)) { - vector::push_back(&mut v.inline_vec, val); + let bucket_size = if (self.inline_capacity.is_some()) { + if (len < *self.inline_capacity.borrow()) { + self.inline_vec.push_back(val); return }; - *option::borrow(&v.bucket_size) + *self.bucket_size.borrow() } else { let val_size = size_of_val(&val); if (val_size * (inline_len + 1) < 150 /* magic number */) { - vector::push_back(&mut v.inline_vec, val); + self.inline_vec.push_back(val); return }; - let estimated_avg_size = max((size_of_val(&v.inline_vec) + val_size) / (inline_len + 1), 1); + let estimated_avg_size = max((size_of_val(&self.inline_vec) + val_size) / (inline_len + 1), 1); max(1024 /* free_write_quota */ / estimated_avg_size, 1) }; - option::fill(&mut v.big_vec, big_vector::empty(bucket_size)); + self.big_vec.fill(big_vector::empty(bucket_size)); }; - big_vector::push_back(option::borrow_mut(&mut v.big_vec), val); + self.big_vec.borrow_mut().push_back(val); } - /// Pop an T from the end of vector `v`. It does shrink the buckets if they're empty. - /// Aborts if `v` is empty. - public fun pop_back(v: &mut SmartVector): T { - assert!(!is_empty(v), error::invalid_state(EVECTOR_EMPTY)); - let big_vec_wrapper = &mut v.big_vec; - if (option::is_some(big_vec_wrapper)) { - let big_vec = option::extract(big_vec_wrapper); - let val = big_vector::pop_back(&mut big_vec); - if (big_vector::is_empty(&big_vec)) { - big_vector::destroy_empty(big_vec) + /// Pop an T from the end of vector `self`. It does shrink the buckets if they're empty. + /// Aborts if `self` is empty. + public fun pop_back(self: &mut SmartVector): T { + assert!(!self.is_empty(), error::invalid_state(EVECTOR_EMPTY)); + let big_vec_wrapper = &mut self.big_vec; + if (big_vec_wrapper.is_some()) { + let big_vec = big_vec_wrapper.extract(); + let val = big_vec.pop_back(); + if (big_vec.is_empty()) { + big_vec.destroy_empty() } else { - option::fill(big_vec_wrapper, big_vec); + big_vec_wrapper.fill(big_vec); }; val } else { - vector::pop_back(&mut v.inline_vec) + self.inline_vec.pop_back() } } - /// Remove the T at index i in the vector v and return the owned value that was previously stored at i in v. + /// Remove the T at index i in the vector self and return the owned value that was previously stored at i in self. /// All Ts occurring at indices greater than i will be shifted down by 1. Will abort if i is out of bounds. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun remove(v: &mut SmartVector, i: u64): T { - let len = length(v); + public fun remove(self: &mut SmartVector, i: u64): T { + let len = self.length(); assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); + let inline_len = self.inline_vec.length(); if (i < inline_len) { - vector::remove(&mut v.inline_vec, i) + self.inline_vec.remove(i) } else { - let big_vec_wrapper = &mut v.big_vec; - let big_vec = option::extract(big_vec_wrapper); - let val = big_vector::remove(&mut big_vec, i - inline_len); - if (big_vector::is_empty(&big_vec)) { - big_vector::destroy_empty(big_vec) + let big_vec_wrapper = &mut self.big_vec; + let big_vec = big_vec_wrapper.extract(); + let val = big_vec.remove(i - inline_len); + if (big_vec.is_empty()) { + big_vec.destroy_empty() } else { - option::fill(big_vec_wrapper, big_vec); + big_vec_wrapper.fill(big_vec); }; val } } - /// Swap the `i`th T of the vector `v` with the last T and then pop the vector. + /// Swap the `i`th T of the vector `self` with the last T and then pop the vector. /// This is O(1), but does not preserve ordering of Ts in the vector. /// Aborts if `i` is out of bounds. - public fun swap_remove(v: &mut SmartVector, i: u64): T { - let len = length(v); + public fun swap_remove(self: &mut SmartVector, i: u64): T { + let len = self.length(); assert!(i < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); - let big_vec_wrapper = &mut v.big_vec; - let inline_vec = &mut v.inline_vec; + let inline_len = self.inline_vec.length(); + let big_vec_wrapper = &mut self.big_vec; + let inline_vec = &mut self.inline_vec; if (i >= inline_len) { - let big_vec = option::extract(big_vec_wrapper); - let val = big_vector::swap_remove(&mut big_vec, i - inline_len); - if (big_vector::is_empty(&big_vec)) { - big_vector::destroy_empty(big_vec) + let big_vec = big_vec_wrapper.extract(); + let val = big_vec.swap_remove(i - inline_len); + if (big_vec.is_empty()) { + big_vec.destroy_empty() } else { - option::fill(big_vec_wrapper, big_vec); + big_vec_wrapper.fill(big_vec); }; val } else { if (inline_len < len) { - let big_vec = option::extract(big_vec_wrapper); - let last_from_big_vec = big_vector::pop_back(&mut big_vec); - if (big_vector::is_empty(&big_vec)) { - big_vector::destroy_empty(big_vec) + let big_vec = big_vec_wrapper.extract(); + let last_from_big_vec = big_vec.pop_back(); + if (big_vec.is_empty()) { + big_vec.destroy_empty() } else { - option::fill(big_vec_wrapper, big_vec); + big_vec_wrapper.fill(big_vec); }; - vector::push_back(inline_vec, last_from_big_vec); + inline_vec.push_back(last_from_big_vec); }; - vector::swap_remove(inline_vec, i) + inline_vec.swap_remove(i) } } /// Swap the Ts at the i'th and j'th indices in the vector v. Will abort if either of i or j are out of bounds - /// for v. - public fun swap(v: &mut SmartVector, i: u64, j: u64) { + /// for self. + public fun swap(self: &mut SmartVector, i: u64, j: u64) { if (i > j) { - return swap(v, j, i) + return self.swap(j, i) }; - let len = length(v); + let len = self.length(); assert!(j < len, error::invalid_argument(EINDEX_OUT_OF_BOUNDS)); - let inline_len = vector::length(&v.inline_vec); + let inline_len = self.inline_vec.length(); if (i >= inline_len) { - big_vector::swap(option::borrow_mut(&mut v.big_vec), i - inline_len, j - inline_len); + self.big_vec.borrow_mut().swap(i - inline_len, j - inline_len); } else if (j < inline_len) { - vector::swap(&mut v.inline_vec, i, j); + self.inline_vec.swap(i, j); } else { - let big_vec = option::borrow_mut(&mut v.big_vec); - let inline_vec = &mut v.inline_vec; - let element_i = vector::swap_remove(inline_vec, i); - let element_j = big_vector::swap_remove(big_vec, j - inline_len); - vector::push_back(inline_vec, element_j); - vector::swap(inline_vec, i, inline_len - 1); - big_vector::push_back(big_vec, element_i); - big_vector::swap(big_vec, j - inline_len, len - inline_len - 1); + let big_vec = self.big_vec.borrow_mut(); + let inline_vec = &mut self.inline_vec; + let element_i = inline_vec.swap_remove(i); + let element_j = big_vec.swap_remove(j - inline_len); + inline_vec.push_back(element_j); + inline_vec.swap(i, inline_len - 1); + big_vec.push_back(element_i); + big_vec.swap(j - inline_len, len - inline_len - 1); } } - /// Reverse the order of the Ts in the vector v in-place. + /// Reverse the order of the Ts in the vector self in-place. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun reverse(v: &mut SmartVector) { - let inline_len = vector::length(&v.inline_vec); - let i = 0; + public fun reverse(self: &mut SmartVector) { + let inline_len = self.inline_vec.length(); let new_inline_vec = vector[]; // Push the last `inline_len` Ts into a temp vector. - while (i < inline_len) { - vector::push_back(&mut new_inline_vec, pop_back(v)); - i = i + 1; + for (i in 0..inline_len) { + new_inline_vec.push_back(self.pop_back()); }; - vector::reverse(&mut new_inline_vec); + new_inline_vec.reverse(); // Reverse the big_vector left if exists. - if (option::is_some(&v.big_vec)) { - big_vector::reverse(option::borrow_mut(&mut v.big_vec)); + if (self.big_vec.is_some()) { + self.big_vec.borrow_mut().reverse(); }; // Mem::swap the two vectors. let temp_vec = vector[]; - while (!vector::is_empty(&mut v.inline_vec)) { - vector::push_back(&mut temp_vec, vector::pop_back(&mut v.inline_vec)); + while (!self.inline_vec.is_empty()) { + temp_vec.push_back(self.inline_vec.pop_back()); }; - vector::reverse(&mut temp_vec); - while (!vector::is_empty(&mut new_inline_vec)) { - vector::push_back(&mut v.inline_vec, vector::pop_back(&mut new_inline_vec)); + temp_vec.reverse(); + while (!new_inline_vec.is_empty()) { + self.inline_vec.push_back(new_inline_vec.pop_back()); }; - vector::destroy_empty(new_inline_vec); + new_inline_vec.destroy_empty(); // Push the rest Ts originally left in inline_vector back to the end of the smart vector. - while (!vector::is_empty(&mut temp_vec)) { - push_back(v, vector::pop_back(&mut temp_vec)); + while (!temp_vec.is_empty()) { + self.push_back(temp_vec.pop_back()); }; - vector::destroy_empty(temp_vec); + temp_vec.destroy_empty(); } - /// Return `(true, i)` if `val` is in the vector `v` at index `i`. + /// Return `(true, i)` if `val` is in the vector `self` at index `i`. /// Otherwise, returns `(false, 0)`. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun index_of(v: &SmartVector, val: &T): (bool, u64) { - let (found, i) = vector::index_of(&v.inline_vec, val); + public fun index_of(self: &SmartVector, val: &T): (bool, u64) { + let (found, i) = self.inline_vec.index_of(val); if (found) { (true, i) - } else if (option::is_some(&v.big_vec)) { - let (found, i) = big_vector::index_of(option::borrow(&v.big_vec), val); - (found, i + vector::length(&v.inline_vec)) + } else if (self.big_vec.is_some()) { + let (found, i) = self.big_vec.borrow().index_of(val); + (found, i + self.inline_vec.length()) } else { (false, 0) } } - /// Return true if `val` is in the vector `v`. + /// Return true if `val` is in the vector `self`. /// Disclaimer: This function may be costly. Use it at your own discretion. - public fun contains(v: &SmartVector, val: &T): bool { - if (is_empty(v)) return false; - let (exist, _) = index_of(v, val); + public fun contains(self: &SmartVector, val: &T): bool { + if (self.is_empty()) return false; + let (exist, _) = self.index_of(val); exist } /// Return the length of the vector. - public fun length(v: &SmartVector): u64 { - vector::length(&v.inline_vec) + if (option::is_none(&v.big_vec)) { + public fun length(self: &SmartVector): u64 { + self.inline_vec.length() + if (self.big_vec.is_none()) { 0 } else { - big_vector::length(option::borrow(&v.big_vec)) + self.big_vec.borrow().length() } } - /// Return `true` if the vector `v` has no Ts and `false` otherwise. - public fun is_empty(v: &SmartVector): bool { - length(v) == 0 + /// Return `true` if the vector `self` has no Ts and `false` otherwise. + public fun is_empty(self: &SmartVector): bool { + self.length() == 0 } /// Apply the function to each T in the vector, consuming it. - public inline fun for_each(v: SmartVector, f: |T|) { - aptos_std::smart_vector::reverse(&mut v); // We need to reverse the vector to consume it efficiently - aptos_std::smart_vector::for_each_reverse(v, |e| f(e)); + public inline fun for_each(self: SmartVector, f: |T|) { + self.reverse(); // We need to reverse the vector to consume it efficiently + self.for_each_reverse(|e| f(e)); } /// Apply the function to each T in the vector, consuming it. - public inline fun for_each_reverse(v: SmartVector, f: |T|) { - let len = aptos_std::smart_vector::length(&v); + public inline fun for_each_reverse(self: SmartVector, f: |T|) { + let len = self.length(); while (len > 0) { - f(aptos_std::smart_vector::pop_back(&mut v)); - len = len - 1; + f(self.pop_back()); + len -= 1; }; - aptos_std::smart_vector::destroy_empty(v) + self.destroy_empty() } /// Apply the function to a reference of each T in the vector. - public inline fun for_each_ref(v: &SmartVector, f: |&T|) { - let i = 0; - let len = aptos_std::smart_vector::length(v); - while (i < len) { - f(aptos_std::smart_vector::borrow(v, i)); - i = i + 1 + public inline fun for_each_ref(self: &SmartVector, f: |&T|) { + let len = self.length(); + for (i in 0..len) { + f(self.borrow(i)); } } /// Apply the function to a mutable reference to each T in the vector. - public inline fun for_each_mut(v: &mut SmartVector, f: |&mut T|) { - let i = 0; - let len = aptos_std::smart_vector::length(v); - while (i < len) { - f(aptos_std::smart_vector::borrow_mut(v, i)); - i = i + 1 + public inline fun for_each_mut(self: &mut SmartVector, f: |&mut T|) { + let len = self.length(); + for (i in 0..len) { + f(self.borrow_mut(i)); } } /// Apply the function to a reference of each T in the vector with its index. - public inline fun enumerate_ref(v: &SmartVector, f: |u64, &T|) { - let i = 0; - let len = aptos_std::smart_vector::length(v); - while (i < len) { - f(i, aptos_std::smart_vector::borrow(v, i)); - i = i + 1; + public inline fun enumerate_ref(self: &SmartVector, f: |u64, &T|) { + let len = self.length(); + for (i in 0..len) { + f(i, self.borrow(i)); }; } /// Apply the function to a mutable reference of each T in the vector with its index. - public inline fun enumerate_mut(v: &mut SmartVector, f: |u64, &mut T|) { - let i = 0; - let len = length(v); - while (i < len) { - f(i, borrow_mut(v, i)); - i = i + 1; + public inline fun enumerate_mut(self: &mut SmartVector, f: |u64, &mut T|) { + let len = self.length(); + for (i in 0..len) { + f(i, self.borrow_mut(i)); }; } /// Fold the function over the Ts. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(f(f(0, 1), 2), 3)` public inline fun fold( - v: SmartVector, + self: SmartVector, init: Accumulator, f: |Accumulator, T|Accumulator ): Accumulator { let accu = init; - aptos_std::smart_vector::for_each(v, |elem| accu = f(accu, elem)); + self.for_each(|elem| accu = f(accu, elem)); accu } /// Fold right like fold above but working right to left. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(1, f(2, f(3, 0)))` public inline fun foldr( - v: SmartVector, + self: SmartVector, init: Accumulator, f: |T, Accumulator|Accumulator ): Accumulator { let accu = init; - aptos_std::smart_vector::for_each_reverse(v, |elem| accu = f(elem, accu)); + self.for_each_reverse(|elem| accu = f(elem, accu)); accu } /// Map the function over the references of the Ts of the vector, producing a new vector without modifying the /// original vector. public inline fun map_ref( - v: &SmartVector, + self: &SmartVector, f: |&T1|T2 ): SmartVector { let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::for_each_ref(v, |elem| aptos_std::smart_vector::push_back(&mut result, f(elem))); + self.for_each_ref(|elem| result.push_back(f(elem))); result } /// Map the function over the Ts of the vector, producing a new vector. public inline fun map( - v: SmartVector, + self: SmartVector, f: |T1|T2 ): SmartVector { let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::for_each(v, |elem| push_back(&mut result, f(elem))); + self.for_each(|elem| result.push_back(f(elem))); result } /// Filter the vector using the boolean function, removing all Ts for which `p(e)` is not true. public inline fun filter( - v: SmartVector, + self: SmartVector, p: |&T|bool ): SmartVector { let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::for_each(v, |elem| { - if (p(&elem)) aptos_std::smart_vector::push_back(&mut result, elem); + self.for_each(|elem| { + if (p(&elem)) result.push_back(elem); }); result } - public inline fun zip(v1: SmartVector, v2: SmartVector, f: |T1, T2|) { + public inline fun zip(self: SmartVector, v2: SmartVector, f: |T1, T2|) { // We need to reverse the vectors to consume it efficiently - aptos_std::smart_vector::reverse(&mut v1); - aptos_std::smart_vector::reverse(&mut v2); - aptos_std::smart_vector::zip_reverse(v1, v2, |e1, e2| f(e1, e2)); + self.reverse(); + v2.reverse(); + self.zip_reverse(v2, |e1, e2| f(e1, e2)); } /// Apply the function to each pair of elements in the two given vectors in the reverse order, consuming them. /// This errors out if the vectors are not of the same length. public inline fun zip_reverse( - v1: SmartVector, + self: SmartVector, v2: SmartVector, f: |T1, T2|, ) { - let len = aptos_std::smart_vector::length(&v1); + let len = self.length(); // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(len == aptos_std::smart_vector::length(&v2), 0x20005); + assert!(len == v2.length(), 0x20005); while (len > 0) { - f(aptos_std::smart_vector::pop_back(&mut v1), aptos_std::smart_vector::pop_back(&mut v2)); - len = len - 1; + f(self.pop_back(), v2.pop_back()); + len -= 1; }; - aptos_std::smart_vector::destroy_empty(v1); - aptos_std::smart_vector::destroy_empty(v2); + self.destroy_empty(); + v2.destroy_empty(); } /// Apply the function to the references of each pair of elements in the two given vectors. /// This errors out if the vectors are not of the same length. public inline fun zip_ref( - v1: &SmartVector, + self: &SmartVector, v2: &SmartVector, f: |&T1, &T2|, ) { - let len = aptos_std::smart_vector::length(v1); + let len = self.length(); // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(len == aptos_std::smart_vector::length(v2), 0x20005); - let i = 0; - while (i < len) { - f(aptos_std::smart_vector::borrow(v1, i), aptos_std::smart_vector::borrow(v2, i)); - i = i + 1 + assert!(len == v2.length(), 0x20005); + for (i in 0..len) { + f(self.borrow(i), v2.borrow(i)); } } /// Apply the function to mutable references to each pair of elements in the two given vectors. /// This errors out if the vectors are not of the same length. public inline fun zip_mut( - v1: &mut SmartVector, + self: &mut SmartVector, v2: &mut SmartVector, f: |&mut T1, &mut T2|, ) { - let i = 0; - let len = aptos_std::smart_vector::length(v1); + let len = self.length(); // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(len == aptos_std::smart_vector::length(v2), 0x20005); - while (i < len) { - f(aptos_std::smart_vector::borrow_mut(v1, i), aptos_std::smart_vector::borrow_mut(v2, i)); - i = i + 1 + assert!(len == v2.length(), 0x20005); + for (i in 0..len) { + f(self.borrow_mut(i), v2.borrow_mut(i)); } } /// Map the function over the element pairs of the two vectors, producing a new vector. public inline fun zip_map( - v1: SmartVector, + self: SmartVector, v2: SmartVector, f: |T1, T2|NewT ): SmartVector { // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(aptos_std::smart_vector::length(&v1) == aptos_std::smart_vector::length(&v2), 0x20005); + assert!(self.length() == v2.length(), 0x20005); let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + self.zip(v2, |e1, e2| result.push_back(f(e1, e2))); result } /// Map the function over the references of the element pairs of two vectors, producing a new vector from the return /// values without modifying the original vectors. public inline fun zip_map_ref( - v1: &SmartVector, + self: &SmartVector, v2: &SmartVector, f: |&T1, &T2|NewT ): SmartVector { // We can't use the constant ESMART_VECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(aptos_std::smart_vector::length(v1) == aptos_std::smart_vector::length(v2), 0x20005); + assert!(self.length() == v2.length(), 0x20005); let result = aptos_std::smart_vector::new(); - aptos_std::smart_vector::zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + self.zip_ref(v2, |e1, e2| result.push_back(f(e1, e2))); result } @@ -555,37 +540,37 @@ module aptos_std::smart_vector { let v = empty(); let i = 0; while (i < 100) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; let j = 0; while (j < 100) { - let val = borrow(&v, j); + let val = v.borrow(j); assert!(*val == j, 0); - j = j + 1; + j += 1; }; while (i > 0) { - i = i - 1; - let (exist, index) = index_of(&v, &i); - let j = pop_back(&mut v); + i -= 1; + let (exist, index) = v.index_of(&i); + let j = v.pop_back(); assert!(exist, 0); assert!(index == i, 0); assert!(j == i, 0); }; while (i < 100) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; - let last_index = length(&v) - 1; - assert!(swap_remove(&mut v, last_index) == 99, 0); - assert!(swap_remove(&mut v, 0) == 0, 0); - while (length(&v) > 0) { + let last_index = v.length() - 1; + assert!(v.swap_remove(last_index) == 99, 0); + assert!(v.swap_remove(0) == 0, 0); + while (v.length() > 0) { // the vector is always [N, 1, 2, ... N-1] with repetitive swap_remove(&mut v, 0) - let expected = length(&v); - let val = swap_remove(&mut v, 0); + let expected = v.length(); + let val = v.swap_remove(0); assert!(val == expected, 0); }; - destroy_empty(v); + v.destroy_empty(); } #[test] @@ -594,13 +579,13 @@ module aptos_std::smart_vector { let v2 = singleton(1u64); let v3 = empty(); let v4 = empty(); - append(&mut v3, v4); - assert!(length(&v3) == 0, 0); - append(&mut v2, v3); - assert!(length(&v2) == 1, 0); - append(&mut v1, v2); - assert!(length(&v1) == 1, 0); - destroy(v1); + v3.append(v4); + assert!(v3.length() == 0, 0); + v2.append(v3); + assert!(v2.length() == 1, 0); + v1.append(v2); + assert!(v1.length() == 1, 0); + v1.destroy(); } #[test] @@ -609,21 +594,21 @@ module aptos_std::smart_vector { let v2 = empty(); let i = 0; while (i < 7) { - push_back(&mut v1, i); - i = i + 1; + v1.push_back(i); + i += 1; }; while (i < 25) { - push_back(&mut v2, i); - i = i + 1; + v2.push_back(i); + i += 1; }; - append(&mut v1, v2); - assert!(length(&v1) == 25, 0); + v1.append(v2); + assert!(v1.length() == 25, 0); i = 0; while (i < 25) { - assert!(*borrow(&v1, i) == i, 0); - i = i + 1; + assert!(*v1.borrow(i) == i, 0); + i += 1; }; - destroy(v1); + v1.destroy(); } #[test] @@ -631,36 +616,36 @@ module aptos_std::smart_vector { let v = empty(); let i = 0u64; while (i < 101) { - push_back(&mut v, i); - i = i + 1; - }; - let inline_len = vector::length(&v.inline_vec); - remove(&mut v, 100); - remove(&mut v, 90); - remove(&mut v, 80); - remove(&mut v, 70); - remove(&mut v, 60); - remove(&mut v, 50); - remove(&mut v, 40); - remove(&mut v, 30); - remove(&mut v, 20); - assert!(vector::length(&v.inline_vec) == inline_len, 0); - remove(&mut v, 10); - assert!(vector::length(&v.inline_vec) + 1 == inline_len, 0); - remove(&mut v, 0); - assert!(vector::length(&v.inline_vec) + 2 == inline_len, 0); - assert!(length(&v) == 90, 0); + v.push_back(i); + i += 1; + }; + let inline_len = v.inline_vec.length(); + v.remove(100); + v.remove(90); + v.remove(80); + v.remove(70); + v.remove(60); + v.remove(50); + v.remove(40); + v.remove(30); + v.remove(20); + assert!(v.inline_vec.length() == inline_len, 0); + v.remove(10); + assert!(v.inline_vec.length() + 1 == inline_len, 0); + v.remove(0); + assert!(v.inline_vec.length() + 2 == inline_len, 0); + assert!(v.length() == 90, 0); let index = 0; i = 0; while (i < 101) { if (i % 10 != 0) { - assert!(*borrow(&v, index) == i, 0); - index = index + 1; + assert!(*v.borrow(index) == i, 0); + index += 1; }; - i = i + 1; + i += 1; }; - destroy(v); + v.destroy(); } #[test] @@ -668,49 +653,46 @@ module aptos_std::smart_vector { let v = empty(); let i = 0u64; while (i < 10) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; - reverse(&mut v); + v.reverse(); let k = 0; while (k < 10) { - assert!(*vector::borrow(&v.inline_vec, k) == 9 - k, 0); - k = k + 1; + assert!(v.inline_vec[k] == 9 - k, 0); + k += 1; }; while (i < 100) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; - while (!vector::is_empty(&v.inline_vec)) { - remove(&mut v, 0); + while (!v.inline_vec.is_empty()) { + v.remove(0); }; - reverse(&mut v); + v.reverse(); i = 0; - let len = length(&v); + let len = v.length(); while (i + 1 < len) { assert!( - *big_vector::borrow(option::borrow(&v.big_vec), i) == *big_vector::borrow( - option::borrow(&v.big_vec), - i + 1 - ) + 1, + *v.big_vec.borrow().borrow(i) == *v.big_vec.borrow().borrow(i + 1) + 1, 0 ); - i = i + 1; + i += 1; }; - destroy(v); + v.destroy(); } #[test] fun smart_vector_add_all_test() { let v = empty_with_config(1, 2); - add_all(&mut v, vector[1, 2, 3, 4, 5, 6]); - assert!(length(&v) == 6, 0); + v.add_all(vector[1, 2, 3, 4, 5, 6]); + assert!(v.length() == 6, 0); let i = 0; while (i < 6) { - assert!(*borrow(&v, i) == i + 1, 0); - i = i + 1; + assert!(*v.borrow(i) == i + 1, 0); + i += 1; }; - destroy(v); + v.destroy(); } #[test] @@ -718,16 +700,16 @@ module aptos_std::smart_vector { let v1 = empty_with_config(7, 11); let i = 0; while (i < 100) { - push_back(&mut v1, i); - i = i + 1; + v1.push_back(i); + i += 1; }; - let v2 = to_vector(&v1); + let v2 = v1.to_vector(); let j = 0; while (j < 100) { - assert!(*vector::borrow(&v2, j) == j, 0); - j = j + 1; + assert!(v2[j] == j, 0); + j += 1; }; - destroy(v1); + v1.destroy(); } #[test] @@ -735,20 +717,20 @@ module aptos_std::smart_vector { let v = empty(); let i = 0; while (i < 101) { - push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; i = 0; while (i < 51) { - swap(&mut v, i, 100 - i); - i = i + 1; + v.swap(i, 100 - i); + i += 1; }; i = 0; while (i < 101) { - assert!(*borrow(&v, i) == 100 - i, 0); - i = i + 1; + assert!(*v.borrow(i) == 100 - i, 0); + i += 1; }; - destroy(v); + v.destroy(); } #[test] @@ -756,11 +738,11 @@ module aptos_std::smart_vector { let v = empty(); let i = 0; while (i < 100) { - push_back(&mut v, i); - let (found, idx) = index_of(&mut v, &i); + v.push_back(i); + let (found, idx) = v.index_of(&i); assert!(found && idx == i, 0); - i = i + 1; + i += 1; }; - destroy(v); + v.destroy(); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move index c1af495eaa9ed..1d3511afd9632 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/smart_vector.spec.move @@ -2,18 +2,19 @@ spec aptos_std::smart_vector { spec SmartVector { // `bucket_size` shouldn't be 0, if specified. - invariant option::is_none(bucket_size) - || (option::is_some(bucket_size) && option::borrow(bucket_size) != 0); + invariant bucket_size.is_none() + || (bucket_size.is_some() && bucket_size.borrow() != 0); // vector length should be <= `inline_capacity`, if specified. - invariant option::is_none(inline_capacity) - || (len(inline_vec) <= option::borrow(inline_capacity)); + invariant inline_capacity.is_none() + || (len(inline_vec) <= inline_capacity.borrow()); // both `inline_capacity` and `bucket_size` should either exist or shouldn't exist at all. - invariant (option::is_none(inline_capacity) && option::is_none(bucket_size)) - || (option::is_some(inline_capacity) && option::is_some(bucket_size)); + invariant (inline_capacity.is_none() && bucket_size.is_none()) + || (inline_capacity.is_some() && bucket_size.is_some()); } spec length { - aborts_if option::is_some(v.big_vec) && len(v.inline_vec) + big_vector::length(option::spec_borrow(v.big_vec)) > MAX_U64; + aborts_if self.big_vec.is_some() && len(self.inline_vec) + option::spec_borrow( + self.big_vec).length() > MAX_U64; } spec empty { @@ -25,19 +26,19 @@ spec aptos_std::smart_vector { } spec destroy_empty { - aborts_if !(is_empty(v)); - aborts_if len(v.inline_vec) != 0 - || option::is_some(v.big_vec); + aborts_if !(self.is_empty()); + aborts_if len(self.inline_vec) != 0 + || self.big_vec.is_some(); } spec borrow { - aborts_if i >= length(v); - aborts_if option::is_some(v.big_vec) && ( - (len(v.inline_vec) + big_vector::length(option::borrow(v.big_vec))) > MAX_U64 + aborts_if i >= self.length(); + aborts_if self.big_vec.is_some() && ( + (len(self.inline_vec) + self.big_vec.borrow().length::()) > MAX_U64 ); } - spec push_back(v: &mut SmartVector, val: T) { + spec push_back(self: &mut SmartVector, val: T) { // use aptos_std::big_vector; // use aptos_std::type_info; pragma verify = false; // TODO: set to false because of timeout @@ -65,24 +66,24 @@ spec aptos_std::smart_vector { pragma verify_duration_estimate = 120; // TODO: set because of timeout (property proved) - aborts_if option::is_some(v.big_vec) + aborts_if self.big_vec.is_some() && - (table_with_length::spec_len(option::borrow(v.big_vec).buckets) == 0); - aborts_if is_empty(v); - aborts_if option::is_some(v.big_vec) && ( - (len(v.inline_vec) + big_vector::length(option::borrow(v.big_vec))) > MAX_U64 + (table_with_length::spec_len(self.big_vec.borrow().buckets) == 0); + aborts_if self.is_empty(); + aborts_if self.big_vec.is_some() && ( + (len(self.inline_vec) + self.big_vec.borrow().length::()) > MAX_U64 ); - ensures length(v) == length(old(v)) - 1; + ensures self.length() == old(self).length() - 1; } spec swap_remove { pragma verify = false; // TODO: set because of timeout - aborts_if i >= length(v); - aborts_if option::is_some(v.big_vec) && ( - (len(v.inline_vec) + big_vector::length(option::borrow(v.big_vec))) > MAX_U64 + aborts_if i >= self.length(); + aborts_if self.big_vec.is_some() && ( + (len(self.inline_vec) + self.big_vec.borrow().length::()) > MAX_U64 ); - ensures length(v) == length(old(v)) - 1; + ensures self.length() == old(self).length() - 1; } spec swap { @@ -97,4 +98,8 @@ spec aptos_std::smart_vector { spec remove { pragma verify = false; } + + spec singleton { + pragma verify = false; + } } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/storage_slots_allocator.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/storage_slots_allocator.move new file mode 100644 index 0000000000000..591cf7738bb49 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/storage_slots_allocator.move @@ -0,0 +1,232 @@ +/// Abstraction to having "addressable" storage slots (i.e. items) in global storage. +/// Addresses are local u64 values (unique within a single StorageSlotsAllocator instance, +/// but can and do overlap across instances). +/// +/// Allows optionally to initialize slots (and pay for them upfront), and then reuse them, +/// providing predictable storage costs. +/// +/// If we need to mutate multiple slots at the same time, we can workaround borrow_mut preventing us from that, +/// via provided pair of `remove_and_reserve` and `fill_reserved_slot` methods, to do so in non-conflicting manner. +/// +/// Similarly allows getting an address upfront via `reserve_slot`, for a slot created +/// later (i.e. if we need address to initialize the value itself). +/// +/// In the future, more sophisticated strategies can be added, without breaking/modifying callers, +/// for example: +/// * inlining some nodes +/// * having a fee-payer for any storage creation operations +module aptos_std::storage_slots_allocator { + use std::error; + use aptos_std::table_with_length::{Self, TableWithLength}; + use std::option::{Self, Option}; + + const EINVALID_ARGUMENT: u64 = 1; + const ECANNOT_HAVE_SPARES_WITHOUT_REUSE: u64 = 2; + const EINTERNAL_INVARIANT_BROKEN: u64 = 7; + + const NULL_INDEX: u64 = 0; + const FIRST_INDEX: u64 = 10; // keeping space for usecase-specific values + + /// Data stored in an individual slot + enum Link has store { + /// Variant that stores actual data + Occupied { + value: T, + }, + /// Empty variant (that keeps storage item from being deleted) + /// and represents a node in a linked list of empty slots. + Vacant { + next: u64, + } + } + + enum StorageSlotsAllocator has store { + // V1 is sequential - any two operations on the StorageSlotsAllocator will conflict. + // In general, StorageSlotsAllocator is invoked on less frequent operations, so + // that shouldn't be a big issue. + V1 { + slots: Option>>, // Lazily create slots table only when needed + new_slot_index: u64, + should_reuse: bool, + reuse_head_index: u64, + reuse_spare_count: u32, + }, + } + + /// Handle to a reserved slot within a transaction. + /// Not copy/drop/store-able, to guarantee reservation + /// is used or released within the transaction. + struct ReservedSlot { + slot_index: u64, + } + + /// Ownership handle to a slot. + /// Not copy/drop-able to make sure slots are released when not needed, + /// and there is unique owner for each slot. + struct StoredSlot has store { + slot_index: u64, + } + + public fun new(should_reuse: bool): StorageSlotsAllocator { + StorageSlotsAllocator::V1 { + slots: option::none(), + new_slot_index: FIRST_INDEX, + should_reuse, + reuse_head_index: NULL_INDEX, + reuse_spare_count: 0, + } + } + + public fun allocate_spare_slots(self: &mut StorageSlotsAllocator, num_to_allocate: u64) { + assert!(self.should_reuse, error::invalid_argument(ECANNOT_HAVE_SPARES_WITHOUT_REUSE)); + for (i in 0..num_to_allocate) { + let slot_index = self.next_slot_index(); + self.maybe_push_to_reuse_queue(slot_index); + }; + } + + public fun get_num_spare_slot_count(self: &StorageSlotsAllocator): u32 { + assert!(self.should_reuse, error::invalid_argument(ECANNOT_HAVE_SPARES_WITHOUT_REUSE)); + self.reuse_spare_count + } + + public fun add(self: &mut StorageSlotsAllocator, val: T): StoredSlot { + let (stored_slot, reserved_slot) = self.reserve_slot(); + self.fill_reserved_slot(reserved_slot, val); + stored_slot + } + + public fun remove(self: &mut StorageSlotsAllocator, slot: StoredSlot): T { + let (reserved_slot, value) = self.remove_and_reserve(slot.stored_to_index()); + self.free_reserved_slot(reserved_slot, slot); + value + } + + public fun destroy_empty(self: StorageSlotsAllocator) { + loop { + let reuse_index = self.maybe_pop_from_reuse_queue(); + if (reuse_index == NULL_INDEX) { + break; + }; + }; + match (self) { + V1 { + slots, + new_slot_index: _, + should_reuse: _, + reuse_head_index, + reuse_spare_count: _, + } => { + assert!(reuse_head_index == NULL_INDEX, EINTERNAL_INVARIANT_BROKEN); + if (slots.is_some()) { + slots.destroy_some().destroy_empty(); + } else { + slots.destroy_none(); + } + }, + }; + } + + public fun borrow(self: &StorageSlotsAllocator, slot_index: u64): &T { + &self.slots.borrow().borrow(slot_index).value + } + + public fun borrow_mut(self: &mut StorageSlotsAllocator, slot_index: u64): &mut T { + &mut self.slots.borrow_mut().borrow_mut(slot_index).value + } + + // We also provide here operations where `add()` is split into `reserve_slot`, + // and then doing fill_reserved_slot later. + + // Similarly we have `remove_and_reserve`, and then `fill_reserved_slot` later. + + public fun reserve_slot(self: &mut StorageSlotsAllocator): (StoredSlot, ReservedSlot) { + let slot_index = self.maybe_pop_from_reuse_queue(); + if (slot_index == NULL_INDEX) { + slot_index = self.next_slot_index(); + }; + + ( + StoredSlot { slot_index }, + ReservedSlot { slot_index }, + ) + } + + public fun fill_reserved_slot(self: &mut StorageSlotsAllocator, slot: ReservedSlot, val: T) { + let ReservedSlot { slot_index } = slot; + self.add_link(slot_index, Link::Occupied { value: val }); + } + + /// Remove storage slot, but reserve it for later. + public fun remove_and_reserve(self: &mut StorageSlotsAllocator, slot_index: u64): (ReservedSlot, T) { + let Link::Occupied { value } = self.remove_link(slot_index); + (ReservedSlot { slot_index }, value) + } + + public fun free_reserved_slot(self: &mut StorageSlotsAllocator, reserved_slot: ReservedSlot, stored_slot: StoredSlot) { + let ReservedSlot { slot_index } = reserved_slot; + assert!(slot_index == stored_slot.slot_index, EINVALID_ARGUMENT); + let StoredSlot { slot_index: _ } = stored_slot; + self.maybe_push_to_reuse_queue(slot_index); + } + + // ========== Section for methods handling references ======== + + public fun reserved_to_index(self: &ReservedSlot): u64 { + self.slot_index + } + + public fun stored_to_index(self: &StoredSlot): u64 { + self.slot_index + } + + public fun is_null_index(slot_index: u64): bool { + slot_index == NULL_INDEX + } + + public fun is_special_unused_index(slot_index: u64): bool { + slot_index != NULL_INDEX && slot_index < FIRST_INDEX + } + + // ========== Section for private internal utility methods ======== + + fun maybe_pop_from_reuse_queue(self: &mut StorageSlotsAllocator): u64 { + let slot_index = self.reuse_head_index; + if (slot_index != NULL_INDEX) { + let Link::Vacant { next } = self.remove_link(slot_index); + self.reuse_head_index = next; + self.reuse_spare_count -= 1; + }; + slot_index + } + + fun maybe_push_to_reuse_queue(self: &mut StorageSlotsAllocator, slot_index: u64) { + if (self.should_reuse) { + let link = Link::Vacant { next: self.reuse_head_index }; + self.add_link(slot_index, link); + self.reuse_head_index = slot_index; + self.reuse_spare_count += 1; + }; + } + + fun next_slot_index(self: &mut StorageSlotsAllocator): u64 { + let slot_index = self.new_slot_index; + self.new_slot_index += 1; + if (self.slots.is_none()) { + self.slots.fill(table_with_length::new>()); + }; + slot_index + } + + fun add_link(self: &mut StorageSlotsAllocator, slot_index: u64, link: Link) { + self.slots.borrow_mut().add(slot_index, link); + } + + fun remove_link(self: &mut StorageSlotsAllocator, slot_index: u64): Link { + self.slots.borrow_mut().remove(slot_index) + } + + spec module { + pragma verify = false; + } +} diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/tests/smart_table_test.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/tests/smart_table_test.move index 089b757439e6f..a5354e1fd375a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/tests/smart_table_test.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/tests/smart_table_test.move @@ -5,10 +5,8 @@ module aptos_std::smart_table_test { #[test_only] public fun make_smart_table(): SmartTable { let table = smart_table::new_with_config(0, 50, 10); - let i = 0u64; - while (i < 100) { - smart_table::add(&mut table, i, i); - i = i + 1; + for (i in 0..100) { + table.add(i, i); }; table } @@ -17,42 +15,42 @@ module aptos_std::smart_table_test { public fun smart_table_for_each_ref_test() { let t = make_smart_table(); let s = 0; - smart_table::for_each_ref(&t, |x, y| { - s = s + *x + *y; + t.for_each_ref(|x, y| { + s += *x + *y; }); assert!(s == 9900, 0); - smart_table::destroy(t); + t.destroy(); } #[test] public fun smart_table_for_each_mut_test() { let t = make_smart_table(); - smart_table::for_each_mut(&mut t, |_key, val| { + t.for_each_mut(|_key, val| { let val: &mut u64 = val; - *val = *val + 1 + *val += 1 }); - smart_table::for_each_ref(&t, |key, val| { + t.for_each_ref(|key, val| { assert!(*key + 1 == *val, *key); }); - smart_table::destroy(t); + t.destroy(); } #[test] public fun smart_table_test_map_ref_test() { let t = make_smart_table(); - let r = smart_table::map_ref(&t, |val| *val + 1); - smart_table::for_each_ref(&r, |key, val| { + let r = t.map_ref(|val| *val + 1); + r.for_each_ref(|key, val| { assert!(*key + 1 == *val, *key); }); - smart_table::destroy(t); - smart_table::destroy(r); + t.destroy(); + r.destroy(); } #[test] public fun smart_table_any_test() { let t = make_smart_table(); - let r = smart_table::any(&t, |_k, v| *v >= 99); + let r = t.any(|_k, v| *v >= 99); assert!(r, 0); - smart_table::destroy(t); + t.destroy(); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/data_structures/tests/smart_vector_test.move b/aptos-move/framework/aptos-stdlib/sources/data_structures/tests/smart_vector_test.move index ed7cb50af35f6..851e28e443e8e 100644 --- a/aptos-move/framework/aptos-stdlib/sources/data_structures/tests/smart_vector_test.move +++ b/aptos-move/framework/aptos-stdlib/sources/data_structures/tests/smart_vector_test.move @@ -8,8 +8,8 @@ module aptos_std::smart_vector_test { let v = V::new(); let i = 1u64; while (i <= k) { - V::push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; v } @@ -18,9 +18,9 @@ module aptos_std::smart_vector_test { fun smart_vector_for_each_test() { let v = make_smart_vector(100); let i = 0; - V::for_each(v, |x| { + v.for_each(|x| { assert!(i + 1 == x, 0); - i = i + 1; + i += 1; }); } @@ -28,9 +28,9 @@ module aptos_std::smart_vector_test { fun smart_vector_for_each_reverse_test() { let v = make_smart_vector(100); let i = 0; - V::for_each_reverse(v, |x| { + v.for_each_reverse(|x| { assert!(i == 100 - x, 0); - i = i + 1; + i += 1; }); } @@ -38,21 +38,21 @@ module aptos_std::smart_vector_test { fun smart_vector_for_each_ref_test() { let v = make_smart_vector(100); let s = 0; - V::for_each_ref(&v, |x| { - s = s + *x; + v.for_each_ref(|x| { + s += *x; }); assert!(s == 5050, 0); - V::destroy(v); + v.destroy(); } #[test] fun smart_vector_for_each_mut_test() { let v = make_smart_vector(100); - V::for_each_mut(&mut v, |x| { + v.for_each_mut(|x| { let x: &mut u64 = x; - *x = *x + 1; + *x += 1; }); - assert!(V::fold(v, 0, |s, x| { + assert!(v.fold(0, |s, x| { s + x }) == 5150, 0); } @@ -60,21 +60,21 @@ module aptos_std::smart_vector_test { #[test] fun smart_vector_enumerate_ref_test() { let v = make_smart_vector(100); - V::enumerate_ref(&v, |i, x| { + v.enumerate_ref(|i, x| { assert!(i + 1 == *x, 0); }); - V::destroy(v); + v.destroy(); } #[test] fun smart_vector_enumerate_mut_test() { let v = make_smart_vector(100); - V::enumerate_mut(&mut v, |i, x| { + v.enumerate_mut(|i, x| { let x: &mut u64 = x; assert!(i + 1 == *x, 0); - *x = *x + 1; + *x += 1; }); - assert!(V::fold(v, 0, |s, x| { + assert!(v.fold(0, |s, x| { s + x }) == 5150, 0); } @@ -83,9 +83,9 @@ module aptos_std::smart_vector_test { fun smart_vector_fold_test() { let v = make_smart_vector(100); let i = 0; - let sum = V::fold(v, 0, |s, x| { + let sum = v.fold(0, |s, x| { assert!(i + 1 == x, 0); - i = i + 1; + i += 1; s + x }); assert!(sum == 5050, 0); @@ -95,9 +95,9 @@ module aptos_std::smart_vector_test { fun smart_vector_for_foldr_test() { let v = make_smart_vector(100); let i = 0; - let sum = V::foldr(v, 0, |x, s| { + let sum = v.foldr(0, |x, s| { assert!(i == 100 - x, i); - i = i + 1; + i += 1; s + x }); assert!(sum == 5050, 0); @@ -106,8 +106,8 @@ module aptos_std::smart_vector_test { #[test] fun smart_vector_map_test() { let v = make_smart_vector(100); - let mapped_v = V::map(v, |x| { x * 2 }); - let sum = V::fold(mapped_v, 0, |s, x| { + let mapped_v = v.map(|x| { x * 2 }); + let sum = mapped_v.fold(0, |s, x| { s + x }); assert!(sum == 10100, 0); @@ -116,11 +116,11 @@ module aptos_std::smart_vector_test { #[test] fun smart_vector_map_ref_test() { let v = make_smart_vector(100); - let mapped_v = V::map_ref(&v, |x| *x * 2); - assert!(V::fold(v, 0, |s, x| { + let mapped_v = v.map_ref(|x| *x * 2); + assert!(v.fold(0, |s, x| { s + x }) == 5050, 0); - assert!(V::fold(mapped_v, 0, |s, x| { + assert!(mapped_v.fold(0, |s, x| { s + x }) == 10100, 0); } @@ -128,11 +128,11 @@ module aptos_std::smart_vector_test { #[test] fun smart_vector_filter_test() { let v = make_smart_vector(100); - let filtered_v = V::filter(v, |x| *x % 10 == 0); - V::enumerate_ref(&filtered_v, |i, x| { + let filtered_v = v.filter(|x| *x % 10 == 0); + filtered_v.enumerate_ref(|i, x| { assert!((i + 1) * 10 == *x, 0); }); - V::destroy(filtered_v); + filtered_v.destroy(); } #[test] @@ -140,10 +140,10 @@ module aptos_std::smart_vector_test { let v1 = make_smart_vector(100); let v2 = make_smart_vector(100); let s = 0; - V::zip(v1, v2, |e1, e2| { + v1.zip(v2, |e1, e2| { let e1: u64 = e1; let e2: u64 = e2; - s = s + e1 / e2 + s += e1 / e2 }); assert!(s == 100, 0); } @@ -155,10 +155,10 @@ module aptos_std::smart_vector_test { let v1 = make_smart_vector(100); let v2 = make_smart_vector(99); let s = 0; - V::zip(v1, v2, |e1, e2| { + v1.zip(v2, |e1, e2| { let e1: u64 = e1; let e2: u64 = e2; - s = s + e1 / e2 + s += e1 / e2 }); } @@ -167,10 +167,10 @@ module aptos_std::smart_vector_test { let v1 = make_smart_vector(100); let v2 = make_smart_vector(100); let s = 0; - V::zip_ref(&v1, &v2, |e1, e2| s = s + *e1 / *e2); + v1.zip_ref(&v2, |e1, e2| s += *e1 / *e2); assert!(s == 100, 0); - V::destroy(v1); - V::destroy(v2); + v1.destroy(); + v2.destroy(); } #[test] @@ -180,42 +180,42 @@ module aptos_std::smart_vector_test { let v1 = make_smart_vector(100); let v2 = make_smart_vector(99); let s = 0; - V::zip_ref(&v1, &v2, |e1, e2| s = s + *e1 / *e2); - V::destroy(v1); - V::destroy(v2); + v1.zip_ref(&v2, |e1, e2| s += *e1 / *e2); + v1.destroy(); + v2.destroy(); } #[test] fun smart_vector_test_zip_mut() { let v1 = make_smart_vector(100); let v2 = make_smart_vector(100); - V::zip_mut(&mut v1, &mut v2, |e1, e2| { + v1.zip_mut(&mut v2, |e1, e2| { let e1: &mut u64 = e1; let e2: &mut u64 = e2; - *e1 = *e1 + 1; - *e2 = *e2 - 1; + *e1 += 1; + *e2 -= 1; }); - V::zip_ref(&v1, &v2, |e1, e2| assert!(*e1 == *e2 + 2, 0)); - V::destroy(v1); - V::destroy(v2); + v1.zip_ref(&v2, |e1, e2| assert!(*e1 == *e2 + 2, 0)); + v1.destroy(); + v2.destroy(); } #[test] fun smart_vector_test_zip_map() { let v1 = make_smart_vector(100); let v2 = make_smart_vector(100); - let result = V::zip_map(v1, v2, |e1, e2| e1 / e2); - V::for_each(result, |v| assert!(v == 1, 0)); + let result = v1.zip_map(v2, |e1, e2| e1 / e2); + result.for_each(|v| assert!(v == 1, 0)); } #[test] fun smart_vector_test_zip_map_ref() { let v1 = make_smart_vector(100); let v2 = make_smart_vector(100); - let result = V::zip_map_ref(&v1, &v2, |e1, e2| *e1 / *e2); - V::for_each(result, |v| assert!(v == 1, 0)); - V::destroy(v1); - V::destroy(v2); + let result = v1.zip_map_ref(&v2, |e1, e2| *e1 / *e2); + result.for_each(|v| assert!(v == 1, 0)); + v1.destroy(); + v2.destroy(); } #[test] @@ -225,9 +225,9 @@ module aptos_std::smart_vector_test { let v1 = make_smart_vector(100); let v2 = make_smart_vector(99); let s = 0; - V::zip_mut(&mut v1, &mut v2, |e1, e2| s = s + *e1 / *e2); - V::destroy(v1); - V::destroy(v2); + v1.zip_mut(&mut v2, |e1, e2| s += *e1 / *e2); + v1.destroy(); + v2.destroy(); } #[test] @@ -236,7 +236,7 @@ module aptos_std::smart_vector_test { fun smart_vector_test_zip_map_mismatching_lengths_should_fail() { let v1 = make_smart_vector(100); let v2 = make_smart_vector(99); - V::destroy(V::zip_map(v1, v2, |e1, e2| e1 / e2)); + v1.zip_map(v2, |e1, e2| e1 / e2).destroy(); } #[test] @@ -245,8 +245,8 @@ module aptos_std::smart_vector_test { fun smart_vector_test_zip_map_ref_mismatching_lengths_should_fail() { let v1 = make_smart_vector(100); let v2 = make_smart_vector(99); - V::destroy(V::zip_map_ref(&v1, &v2, |e1, e2| *e1 / *e2)); - V::destroy(v1); - V::destroy(v2); + v1.zip_map_ref(&v2, |e1, e2| *e1 / *e2).destroy(); + v1.destroy(); + v2.destroy(); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/debug.move b/aptos-move/framework/aptos-stdlib/sources/debug.move index 21b707c7a9982..3f49f4021ae4c 100644 --- a/aptos-move/framework/aptos-stdlib/sources/debug.move +++ b/aptos-move/framework/aptos-stdlib/sources/debug.move @@ -17,9 +17,6 @@ module aptos_std::debug { native fun native_print(x: String); native fun native_stack_trace(): String; - #[test_only] - use std::vector; - #[test_only] struct Foo has drop {} #[test_only] @@ -50,7 +47,7 @@ module aptos_std::debug { #[test_only] fun assert_equal(x: &T, expected: vector) { - if (std::string::bytes(&format(x)) != &expected) { + if (format(x).bytes() != &expected) { print(&format(x)); print(&std::string::utf8(expected)); assert!(false, 1); @@ -59,7 +56,7 @@ module aptos_std::debug { #[test_only] fun assert_string_equal(x: vector, expected: vector) { - assert!(std::string::bytes(&format(&std::string::utf8(x))) == &expected, 1); + assert!(format(&std::string::utf8(x)).bytes() == &expected, 1); } #[test] @@ -67,10 +64,7 @@ module aptos_std::debug { let x = 42; assert_equal(&x, b"42"); - let v = vector::empty(); - vector::push_back(&mut v, 100); - vector::push_back(&mut v, 200); - vector::push_back(&mut v, 300); + let v = vector[100, 200, 300]; assert_equal(&v, b"[ 100, 200, 300 ]"); let foo = Foo {}; diff --git a/aptos-move/framework/aptos-stdlib/sources/fixed_point64.move b/aptos-move/framework/aptos-stdlib/sources/fixed_point64.move index ac864c821495b..e01c72c7485e6 100644 --- a/aptos-move/framework/aptos-stdlib/sources/fixed_point64.move +++ b/aptos-move/framework/aptos-stdlib/sources/fixed_point64.move @@ -29,31 +29,31 @@ module aptos_std::fixed_point64 { /// Abort code on calculation result is negative. const ENEGATIVE_RESULT: u64 = 0x10006; - /// Returns x - y. x must be not less than y. - public fun sub(x: FixedPoint64, y: FixedPoint64): FixedPoint64 { - let x_raw = get_raw_value(x); - let y_raw = get_raw_value(y); + /// Returns self - y. self must be not less than y. + public fun sub(self: FixedPoint64, y: FixedPoint64): FixedPoint64 { + let x_raw = self.get_raw_value(); + let y_raw = y.get_raw_value(); assert!(x_raw >= y_raw, ENEGATIVE_RESULT); create_from_raw_value(x_raw - y_raw) } spec sub { pragma opaque; - aborts_if x.value < y.value with ENEGATIVE_RESULT; - ensures result.value == x.value - y.value; + aborts_if self.value < y.value with ENEGATIVE_RESULT; + ensures result.value == self.value - y.value; } - /// Returns x + y. The result cannot be greater than MAX_U128. - public fun add(x: FixedPoint64, y: FixedPoint64): FixedPoint64 { - let x_raw = get_raw_value(x); - let y_raw = get_raw_value(y); + /// Returns self + y. The result cannot be greater than MAX_U128. + public fun add(self: FixedPoint64, y: FixedPoint64): FixedPoint64 { + let x_raw = self.get_raw_value(); + let y_raw = y.get_raw_value(); let result = (x_raw as u256) + (y_raw as u256); assert!(result <= MAX_U128, ERATIO_OUT_OF_RANGE); create_from_raw_value((result as u128)) } spec add { pragma opaque; - aborts_if (x.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE; - ensures result.value == x.value + y.value; + aborts_if (self.value as u256) + (y.value as u256) > MAX_U128 with ERATIO_OUT_OF_RANGE; + ensures result.value == self.value + y.value; } /// Multiply a u128 integer by a fixed-point number, truncating any @@ -172,13 +172,13 @@ module aptos_std::fixed_point64 { /// Accessor for the raw u128 value. Other less common operations, such as /// adding or subtracting FixedPoint64 values, can be done using the raw /// values directly. - public fun get_raw_value(num: FixedPoint64): u128 { - num.value + public fun get_raw_value(self: FixedPoint64): u128 { + self.value } /// Returns true if the ratio is zero. - public fun is_zero(num: FixedPoint64): bool { - num.value == 0 + public fun is_zero(self: FixedPoint64): bool { + self.value == 0 } /// Returns the smaller of the two FixedPoint64 numbers. @@ -223,89 +223,89 @@ module aptos_std::fixed_point64 { } } - /// Returns true if num1 <= num2 - public fun less_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value <= num2.value + /// Returns true if self <= num2 + public fun less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value <= num2.value } spec less_or_equal { pragma opaque; aborts_if false; - ensures result == spec_less_or_equal(num1, num2); + ensures result == spec_less_or_equal(self, num2); } - spec fun spec_less_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value <= num2.value + spec fun spec_less_or_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value <= num2.value } - /// Returns true if num1 < num2 - public fun less(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value < num2.value + /// Returns true if self < num2 + public fun less(self: FixedPoint64, num2: FixedPoint64): bool { + self.value < num2.value } spec less { pragma opaque; aborts_if false; - ensures result == spec_less(num1, num2); + ensures result == spec_less(self, num2); } - spec fun spec_less(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value < num2.value + spec fun spec_less(self: FixedPoint64, num2: FixedPoint64): bool { + self.value < num2.value } - /// Returns true if num1 >= num2 - public fun greater_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value >= num2.value + /// Returns true if self >= num2 + public fun greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value >= num2.value } spec greater_or_equal { pragma opaque; aborts_if false; - ensures result == spec_greater_or_equal(num1, num2); + ensures result == spec_greater_or_equal(self, num2); } - spec fun spec_greater_or_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value >= num2.value + spec fun spec_greater_or_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value >= num2.value } - /// Returns true if num1 > num2 - public fun greater(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value > num2.value + /// Returns true if self > num2 + public fun greater(self: FixedPoint64, num2: FixedPoint64): bool { + self.value > num2.value } spec greater { pragma opaque; aborts_if false; - ensures result == spec_greater(num1, num2); + ensures result == spec_greater(self, num2); } - spec fun spec_greater(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value > num2.value + spec fun spec_greater(self: FixedPoint64, num2: FixedPoint64): bool { + self.value > num2.value } - /// Returns true if num1 = num2 - public fun equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value == num2.value + /// Returns true if self = num2 + public fun equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value == num2.value } spec equal { pragma opaque; aborts_if false; - ensures result == spec_equal(num1, num2); + ensures result == spec_equal(self, num2); } - spec fun spec_equal(num1: FixedPoint64, num2: FixedPoint64): bool { - num1.value == num2.value + spec fun spec_equal(self: FixedPoint64, num2: FixedPoint64): bool { + self.value == num2.value } - /// Returns true if num1 almost equals to num2, which means abs(num1-num2) <= precision - public fun almost_equal(num1: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool { - if (num1.value > num2.value) { - (num1.value - num2.value <= precision.value) + /// Returns true if self almost equals to num2, which means abs(num1-num2) <= precision + public fun almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool { + if (self.value > num2.value) { + (self.value - num2.value <= precision.value) } else { - (num2.value - num1.value <= precision.value) + (num2.value - self.value <= precision.value) } } spec almost_equal { pragma opaque; aborts_if false; - ensures result == spec_almost_equal(num1, num2, precision); + ensures result == spec_almost_equal(self, num2, precision); } - spec fun spec_almost_equal(num1: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool { - if (num1.value > num2.value) { - (num1.value - num2.value <= precision.value) + spec fun spec_almost_equal(self: FixedPoint64, num2: FixedPoint64, precision: FixedPoint64): bool { + if (self.value > num2.value) { + (self.value - num2.value <= precision.value) } else { - (num2.value - num1.value <= precision.value) + (num2.value - self.value <= precision.value) } } /// Create a fixedpoint value from a u128 value. @@ -329,27 +329,27 @@ module aptos_std::fixed_point64 { } /// Returns the largest integer less than or equal to a given number. - public fun floor(num: FixedPoint64): u128 { - num.value >> 64 + public fun floor(self: FixedPoint64): u128 { + self.value >> 64 } spec floor { pragma opaque; aborts_if false; - ensures result == spec_floor(num); + ensures result == spec_floor(self); } - spec fun spec_floor(val: FixedPoint64): u128 { - let fractional = val.value % (1 << 64); + spec fun spec_floor(self: FixedPoint64): u128 { + let fractional = self.value % (1 << 64); if (fractional == 0) { - val.value >> 64 + self.value >> 64 } else { - (val.value - fractional) >> 64 + (self.value - fractional) >> 64 } } /// Rounds up the given FixedPoint64 to the next largest integer. - public fun ceil(num: FixedPoint64): u128 { - let floored_num = floor(num) << 64; - if (num.value == floored_num) { + public fun ceil(self: FixedPoint64): u128 { + let floored_num = self.floor() << 64; + if (self.value == floored_num) { return floored_num >> 64 }; let val = ((floored_num as u256) + (1 << 64)); @@ -360,41 +360,41 @@ module aptos_std::fixed_point64 { pragma verify_duration_estimate = 1000; pragma opaque; aborts_if false; - ensures result == spec_ceil(num); + ensures result == spec_ceil(self); } - spec fun spec_ceil(val: FixedPoint64): u128 { - let fractional = val.value % (1 << 64); + spec fun spec_ceil(self: FixedPoint64): u128 { + let fractional = self.value % (1 << 64); let one = 1 << 64; if (fractional == 0) { - val.value >> 64 + self.value >> 64 } else { - (val.value - fractional + one) >> 64 + (self.value - fractional + one) >> 64 } } /// Returns the value of a FixedPoint64 to the nearest integer. - public fun round(num: FixedPoint64): u128 { - let floored_num = floor(num) << 64; + public fun round(self: FixedPoint64): u128 { + let floored_num = self.floor() << 64; let boundary = floored_num + ((1 << 64) / 2); - if (num.value < boundary) { + if (self.value < boundary) { floored_num >> 64 } else { - ceil(num) + self.ceil() } } spec round { pragma opaque; aborts_if false; - ensures result == spec_round(num); + ensures result == spec_round(self); } - spec fun spec_round(val: FixedPoint64): u128 { - let fractional = val.value % (1 << 64); + spec fun spec_round(self: FixedPoint64): u128 { + let fractional = self.value % (1 << 64); let boundary = (1 << 64) / 2; let one = 1 << 64; if (fractional < boundary) { - (val.value - fractional) >> 64 + (self.value - fractional) >> 64 } else { - (val.value - fractional + one) >> 64 + (self.value - fractional + one) >> 64 } } @@ -410,10 +410,10 @@ module aptos_std::fixed_point64 { public entry fun test_sub() { let x = create_from_rational(9, 7); let y = create_from_rational(1, 3); - let result = sub(x, y); + let result = x.sub(y); // 9/7 - 1/3 = 20/21 let expected_result = create_from_rational(20, 21); - assert_approx_the_same((get_raw_value(result) as u256), (get_raw_value(expected_result) as u256), 16); + assert_approx_the_same((result.get_raw_value() as u256), (expected_result.get_raw_value() as u256), 16); } #[test] @@ -421,13 +421,13 @@ module aptos_std::fixed_point64 { public entry fun test_sub_should_abort() { let x = create_from_rational(1, 3); let y = create_from_rational(9, 7); - let _ = sub(x, y); + let _ = x.sub(y); } #[test_only] /// For functions that approximate a value it's useful to test a value is close /// to the most correct value up to last digit - fun assert_approx_the_same(x: u256, y: u256, precission: u128) { + fun assert_approx_the_same(x: u256, y: u256, precision: u128) { if (x < y) { let tmp = x; x = y; @@ -435,12 +435,12 @@ module aptos_std::fixed_point64 { }; let mult = 1u256; let n = 10u256; - while (precission > 0) { - if (precission % 2 == 1) { - mult = mult * n; + while (precision > 0) { + if (precision % 2 == 1) { + mult *= n; }; - precission = precission / 2; - n = n * n; + precision /= 2; + n *= n; }; assert!((x - y) * mult < x, 0); } diff --git a/aptos-move/framework/aptos-stdlib/sources/from_bcs.move b/aptos-move/framework/aptos-stdlib/sources/from_bcs.move index 1d7c3c542ee60..b21fcc7799d4c 100644 --- a/aptos-move/framework/aptos-stdlib/sources/from_bcs.move +++ b/aptos-move/framework/aptos-stdlib/sources/from_bcs.move @@ -1,6 +1,6 @@ /// This module provides a number of functions to convert _primitive_ types from their representation in `std::bcs` /// to values. This is the opposite of `bcs::to_bytes`. Note that it is not safe to define a generic public `from_bytes` -/// function because this can violate implicit struct invariants, therefore only primitive types are offerred. If +/// function because this can violate implicit struct invariants, therefore only primitive types are offered. If /// a general conversion back-and-force is needed, consider the `aptos_std::Any` type which preserves invariants. /// /// Example: @@ -55,7 +55,7 @@ module aptos_std::from_bcs { public fun to_string(v: vector): String { // To make this safe, we need to evaluate the utf8 invariant. let s = from_bytes(v); - assert!(string::internal_check_utf8(string::bytes(&s)), EINVALID_UTF8); + assert!(string::internal_check_utf8(s.bytes()), EINVALID_UTF8); s } @@ -64,11 +64,12 @@ module aptos_std::from_bcs { /// Note that this function does not put any constraint on `T`. If code uses this function to /// deserialize a linear value, its their responsibility that the data they deserialize is /// owned. - public(friend) native fun from_bytes(bytes: vector): T; + /// + /// Function would abort if T has signer in it. + native friend fun from_bytes(bytes: vector): T; friend aptos_std::any; friend aptos_std::copyable_any; - #[test_only] use std::bcs; @@ -88,4 +89,10 @@ module aptos_std::from_bcs { let bad_vec = b"01"; to_address(bad_vec); } + + #[test(s1 = @0x123)] + #[expected_failure(abort_code = 0x10001, location = Self)] + fun test_signer_roundtrip(s1: signer) { + from_bytes(bcs::to_bytes(&s1)); + } } diff --git a/aptos-move/framework/aptos-stdlib/sources/hash.move b/aptos-move/framework/aptos-stdlib/sources/hash.move index 532fa736e3342..f2f0e5546245a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/hash.move +++ b/aptos-move/framework/aptos-stdlib/sources/hash.move @@ -110,15 +110,12 @@ module aptos_std::aptos_hash { x"c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", ]; - let i = 0; - while (i < std::vector::length(&inputs)) { - let input = *std::vector::borrow(&inputs, i); - let hash_expected = *std::vector::borrow(&outputs, i); + for (i in 0..inputs.length()) { + let input = inputs[i]; + let hash_expected = outputs[i]; let hash = keccak256(input); assert!(hash_expected == hash, 1); - - i = i + 1; }; } @@ -138,15 +135,12 @@ module aptos_std::aptos_hash { x"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", ]; - let i = 0; - while (i < std::vector::length(&inputs)) { - let input = *std::vector::borrow(&inputs, i); - let hash_expected = *std::vector::borrow(&outputs, i); + for (i in 0..inputs.length()) { + let input = inputs[i]; + let hash_expected = outputs[i]; let hash = sha2_512(input); assert!(hash_expected == hash, 1); - - i = i + 1; }; } @@ -165,15 +159,12 @@ module aptos_std::aptos_hash { x"a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26", ]; - let i = 0; - while (i < std::vector::length(&inputs)) { - let input = *std::vector::borrow(&inputs, i); - let hash_expected = *std::vector::borrow(&outputs, i); + for (i in 0..inputs.length()) { + let input = inputs[i]; + let hash_expected = outputs[i]; let hash = sha3_512(input); assert!(hash_expected == hash, 1); - - i = i + 1; }; } @@ -192,15 +183,12 @@ module aptos_std::aptos_hash { x"9c1185a5c5e9fc54612808977ee8f548b2258d31", ]; - let i = 0; - while (i < std::vector::length(&inputs)) { - let input = *std::vector::borrow(&inputs, i); - let hash_expected = *std::vector::borrow(&outputs, i); + for (i in 0..inputs.length()) { + let input = inputs[i]; + let hash_expected = outputs[i]; let hash = ripemd160(input); assert!(hash_expected == hash, 1); - - i = i + 1; }; } @@ -239,15 +227,12 @@ module aptos_std::aptos_hash { x"1deab5a4eb7481453ca9b29e1f7c4be8ba44de4faeeafdf173b310cbaecfc84c", ]; - let i = 0; - while (i < std::vector::length(&inputs)) { - let input = *std::vector::borrow(&inputs, i); - let hash_expected = *std::vector::borrow(&outputs, i); + for (i in 0..inputs.length()) { + let input = inputs[i]; + let hash_expected = outputs[i]; let hash = blake2b_256(input); assert!(hash_expected == hash, 1); - - i = i + 1; }; } } diff --git a/aptos-move/framework/aptos-stdlib/sources/math128.move b/aptos-move/framework/aptos-stdlib/sources/math128.move index 6528153699fb4..7c93cdca7c6c5 100644 --- a/aptos-move/framework/aptos-stdlib/sources/math128.move +++ b/aptos-move/framework/aptos-stdlib/sources/math128.move @@ -39,6 +39,15 @@ module aptos_std::math128 { large } + /// Return least common multiple of `a` & `b` + public inline fun lcm(a: u128, b: u128): u128 { + if (a == 0 || b == 0) { + 0 + } else { + a / gcd(a, b) * b + } + } + /// Returns a * b / c going through u256 to prevent intermediate overflow public inline fun mul_div(a: u128, b: u128, c: u128): u128 { // Inline functions cannot take constants, as then every module using it needs the constant @@ -59,10 +68,10 @@ module aptos_std::math128 { let p = 1; while (e > 1) { if (e % 2 == 1) { - p = p * n; + p *= n; }; - e = e / 2; - n = n * n; + e /= 2; + n *= n; }; p * n } @@ -76,10 +85,10 @@ module aptos_std::math128 { let n = 64; while (n > 0) { if (x >= (1 << n)) { - x = x >> n; - res = res + n; + x >>= n; + res += n; }; - n = n >> 1; + n >>= 1; }; res } @@ -89,9 +98,9 @@ module aptos_std::math128 { let integer_part = floor_log2(x); // Normalize x to [1, 2) in fixed point 32. if (x >= 1 << 32) { - x = x >> (integer_part - 32); + x >>= (integer_part - 32); } else { - x = x << (32 - integer_part); + x <<= (32 - integer_part); }; let frac = 0; let delta = 1 << 31; @@ -101,8 +110,8 @@ module aptos_std::math128 { x = (x * x) >> 32; // x is now in [1, 4) // if x in [2, 4) then log x = 1 + log (x / 2) - if (x >= (2 << 32)) { frac = frac + delta; x = x >> 1; }; - delta = delta >> 1; + if (x >= (2 << 32)) { frac += delta; x >>= 1; }; + delta >>= 1; }; fixed_point32::create_from_raw_value (((integer_part as u64) << 32) + frac) } @@ -112,9 +121,9 @@ module aptos_std::math128 { let integer_part = floor_log2(x); // Normalize x to [1, 2) in fixed point 63. To ensure x is smaller then 1<<64 if (x >= 1 << 63) { - x = x >> (integer_part - 63); + x >>= (integer_part - 63); } else { - x = x << (63 - integer_part); + x <<= (63 - integer_part); }; let frac = 0; let delta = 1 << 63; @@ -124,8 +133,8 @@ module aptos_std::math128 { x = (x * x) >> 63; // x is now in [1, 4) // if x in [2, 4) then log x = 1 + log (x / 2) - if (x >= (2 << 63)) { frac = frac + delta; x = x >> 1; }; - delta = delta >> 1; + if (x >= (2 << 63)) { frac += delta; x >>= 1; }; + delta >>= 1; }; fixed_point64::create_from_raw_value (((integer_part as u128) << 64) + frac) } @@ -193,6 +202,28 @@ module aptos_std::math128 { assert!(gcd(462, 1071) == 21, 0); } + #[test] + fun test_lcm() { + assert!(lcm(0, 0) == 0, 0); + assert!(lcm(0, 1) == 0, 0); + assert!(lcm(1, 0) == 0, 0); + assert!(lcm(1, 1) == 1, 0); + assert!(lcm(1024, 144) == 9216, 0); + assert!(lcm(2, 17) == 34, 0); + assert!(lcm(17, 2) == 34, 0); + assert!(lcm(24, 54) == 216, 0); + assert!(lcm(115, 9) == 1035, 0); + assert!(lcm(101, 14) == 1414, 0); + assert!(lcm(110, 5) == 110, 0); + assert!(lcm(100, 8) == 200, 0); + assert!(lcm(32, 6) == 96, 0); + assert!(lcm(110, 13) == 1430, 0); + assert!(lcm(117, 13) == 117, 0); + assert!(lcm(100, 125) == 500, 0); + assert!(lcm(101, 3) == 303, 0); + assert!(lcm(115, 16) == 1840, 0); + } + #[test] public entry fun test_max() { let result = max(3u128, 6u128); @@ -253,12 +284,12 @@ module aptos_std::math128 { let idx: u8 = 0; while (idx < 128) { assert!(floor_log2(1<> 32; let expected = expected - ((taylor1 + taylor2 / 2 + taylor3 / 3) << 32) / 2977044472; // verify it matches to 8 significant digits - assert_approx_the_same((fixed_point32::get_raw_value(res) as u128), expected, 8); - idx = idx + 1; + assert_approx_the_same((res.get_raw_value() as u128), expected, 8); + idx += 1; }; } @@ -291,8 +322,8 @@ module aptos_std::math128 { let idx: u8 = 0; while (idx < 128) { let res = log2_64(1<> 64; let expected = expected - ((taylor1 + taylor2 / 2 + taylor3 / 3 + taylor4 / 4) << 64) / 12786308645202655660; // verify it matches to 8 significant digits - assert_approx_the_same(fixed_point64::get_raw_value(res), (expected as u128), 14); - idx = idx + 1; + assert_approx_the_same(res.get_raw_value(), (expected as u128), 14); + idx += 1; }; } diff --git a/aptos-move/framework/aptos-stdlib/sources/math64.move b/aptos-move/framework/aptos-stdlib/sources/math64.move index 50fd38ed3f6ab..9b89acbe80934 100644 --- a/aptos-move/framework/aptos-stdlib/sources/math64.move +++ b/aptos-move/framework/aptos-stdlib/sources/math64.move @@ -37,6 +37,15 @@ module aptos_std::math64 { large } + /// Returns least common multiple of `a` & `b`. + public inline fun lcm(a: u64, b: u64): u64 { + if (a == 0 || b == 0) { + 0 + } else { + a / gcd(a, b) * b + } + } + /// Returns a * b / c going through u128 to prevent intermediate overflow public inline fun mul_div(a: u64, b: u64, c: u64): u64 { // Inline functions cannot take constants, as then every module using it needs the constant @@ -57,10 +66,10 @@ module aptos_std::math64 { let p = 1; while (e > 1) { if (e % 2 == 1) { - p = p * n; + p *= n; }; - e = e / 2; - n = n * n; + e /= 2; + n *= n; }; p * n } @@ -74,10 +83,10 @@ module aptos_std::math64 { let n = 32; while (n > 0) { if (x >= (1 << n)) { - x = x >> n; - res = res + n; + x >>= n; + res += n; }; - n = n >> 1; + n >>= 1; }; res } @@ -99,8 +108,8 @@ module aptos_std::math64 { y = (y * y) >> 32; // x is now in [1, 4) // if x in [2, 4) then log x = 1 + log (x / 2) - if (y >= (2 << 32)) { frac = frac + delta; y = y >> 1; }; - delta = delta >> 1; + if (y >= (2 << 32)) { frac += delta; y >>= 1; }; + delta >>= 1; }; fixed_point32::create_from_raw_value (((integer_part as u64) << 32) + frac) } @@ -167,6 +176,28 @@ module aptos_std::math64 { assert!(gcd(462, 1071) == 21, 0); } + #[test] + fun test_lcm() { + assert!(lcm(0, 0) == 0, 0); + assert!(lcm(0, 1) == 0, 0); + assert!(lcm(1, 0) == 0, 0); + assert!(lcm(1, 1) == 1, 0); + assert!(lcm(1024, 144) == 9216, 0); + assert!(lcm(2, 17) == 34, 0); + assert!(lcm(17, 2) == 34, 0); + assert!(lcm(24, 54) == 216, 0); + assert!(lcm(115, 9) == 1035, 0); + assert!(lcm(101, 14) == 1414, 0); + assert!(lcm(110, 5) == 110, 0); + assert!(lcm(100, 8) == 200, 0); + assert!(lcm(32, 6) == 96, 0); + assert!(lcm(110, 13) == 1430, 0); + assert!(lcm(117, 13) == 117, 0); + assert!(lcm(100, 125) == 500, 0); + assert!(lcm(101, 3) == 303, 0); + assert!(lcm(115, 16) == 1840, 0); + } + #[test] public entry fun test_max_64() { let result = max(3u64, 6u64); @@ -233,12 +264,12 @@ module aptos_std::math64 { let idx: u8 = 0; while (idx < 64) { assert!(floor_log2(1<> 32; let expected = expected - ((taylor1 + taylor2 / 2 + taylor3 / 3) << 32) / 2977044472; // verify it matches to 8 significant digits - assert_approx_the_same((fixed_point32::get_raw_value(res) as u128), expected, 8); - idx = idx + 1; + assert_approx_the_same((res.get_raw_value() as u128), expected, 8); + idx += 1; }; } diff --git a/aptos-move/framework/aptos-stdlib/sources/math_fixed.move b/aptos-move/framework/aptos-stdlib/sources/math_fixed.move index a2a854d0c0c61..08c3bf1f2ca39 100644 --- a/aptos-move/framework/aptos-stdlib/sources/math_fixed.move +++ b/aptos-move/framework/aptos-stdlib/sources/math_fixed.move @@ -14,40 +14,40 @@ module aptos_std::math_fixed { /// Square root of fixed point number public fun sqrt(x: FixedPoint32): FixedPoint32 { - let y = (fixed_point32::get_raw_value(x) as u128); + let y = (x.get_raw_value() as u128); fixed_point32::create_from_raw_value((math128::sqrt(y << 32) as u64)) } /// Exponent function with a precission of 9 digits. public fun exp(x: FixedPoint32): FixedPoint32 { - let raw_value = (fixed_point32::get_raw_value(x) as u128); + let raw_value = (x.get_raw_value() as u128); fixed_point32::create_from_raw_value((exp_raw(raw_value) as u64)) } /// Because log2 is negative for values < 1 we instead return log2(x) + 32 which /// is positive for all values of x. public fun log2_plus_32(x: FixedPoint32): FixedPoint32 { - let raw_value = (fixed_point32::get_raw_value(x) as u128); + let raw_value = (x.get_raw_value() as u128); math128::log2(raw_value) } public fun ln_plus_32ln2(x: FixedPoint32): FixedPoint32 { - let raw_value = (fixed_point32::get_raw_value(x) as u128); - let x = (fixed_point32::get_raw_value(math128::log2(raw_value)) as u128); + let raw_value = (x.get_raw_value() as u128); + let x = (math128::log2(raw_value).get_raw_value() as u128); fixed_point32::create_from_raw_value((x * LN2 >> 32 as u64)) } /// Integer power of a fixed point number public fun pow(x: FixedPoint32, n: u64): FixedPoint32 { - let raw_value = (fixed_point32::get_raw_value(x) as u128); + let raw_value = (x.get_raw_value() as u128); fixed_point32::create_from_raw_value((pow_raw(raw_value, (n as u128)) as u64)) } /// Specialized function for x * y / z that omits intermediate shifting public fun mul_div(x: FixedPoint32, y: FixedPoint32, z: FixedPoint32): FixedPoint32 { - let a = fixed_point32::get_raw_value(x); - let b = fixed_point32::get_raw_value(y); - let c = fixed_point32::get_raw_value(z); + let a = x.get_raw_value(); + let b = y.get_raw_value(); + let c = z.get_raw_value(); fixed_point32::create_from_raw_value (math64::mul_div(a, b, c)) } @@ -68,7 +68,7 @@ module aptos_std::math_fixed { // This has an error of 5000 / 4 10^9 roughly 6 digits of precission let power = pow_raw(roottwo, exponent); let eps_correction = 1241009291; - power = power + ((power * eps_correction * exponent) >> 64); + power += ((power * eps_correction * exponent) >> 64); // x is fixed point number smaller than 595528/2^32 < 0.00014 so we need only 2 tayler steps // to get the 6 digits of precission let taylor1 = (power * x) >> (32 - shift); @@ -80,12 +80,12 @@ module aptos_std::math_fixed { // Calculate x to the power of n, where x and the result are fixed point numbers. fun pow_raw(x: u128, n: u128): u128 { let res: u256 = 1 << 64; - x = x << 32; + x <<= 32; while (n != 0) { if (n & 1 != 0) { res = (res * (x as u256)) >> 64; }; - n = n >> 1; + n >>= 1; x = ((((x as u256) * (x as u256)) >> 64) as u128); }; ((res >> 32) as u128) @@ -96,10 +96,10 @@ module aptos_std::math_fixed { // Sqrt is based on math128::sqrt and thus most of the testing is done there. let fixed_base = 1 << 32; let result = sqrt(fixed_point32::create_from_u64(1)); - assert!(fixed_point32::get_raw_value(result) == fixed_base, 0); + assert!(result.get_raw_value() == fixed_base, 0); let result = sqrt(fixed_point32::create_from_u64(2)); - assert_approx_the_same((fixed_point32::get_raw_value(result) as u128), 6074001000, 9); + assert_approx_the_same((result.get_raw_value() as u128), 6074001000, 9); } #[test] diff --git a/aptos-move/framework/aptos-stdlib/sources/math_fixed64.move b/aptos-move/framework/aptos-stdlib/sources/math_fixed64.move index 2369b6afebc3e..8c9f41234cc65 100644 --- a/aptos-move/framework/aptos-stdlib/sources/math_fixed64.move +++ b/aptos-move/framework/aptos-stdlib/sources/math_fixed64.move @@ -13,7 +13,7 @@ module aptos_std::math_fixed64 { /// Square root of fixed point number public fun sqrt(x: FixedPoint64): FixedPoint64 { - let y = fixed_point64::get_raw_value(x); + let y = x.get_raw_value(); let z = (math128::sqrt(y) << 32 as u256); z = (z + ((y as u256) << 64) / z) >> 1; fixed_point64::create_from_raw_value((z as u128)) @@ -21,34 +21,34 @@ module aptos_std::math_fixed64 { /// Exponent function with a precission of 9 digits. public fun exp(x: FixedPoint64): FixedPoint64 { - let raw_value = (fixed_point64::get_raw_value(x) as u256); + let raw_value = (x.get_raw_value() as u256); fixed_point64::create_from_raw_value((exp_raw(raw_value) as u128)) } /// Because log2 is negative for values < 1 we instead return log2(x) + 64 which /// is positive for all values of x. public fun log2_plus_64(x: FixedPoint64): FixedPoint64 { - let raw_value = (fixed_point64::get_raw_value(x) as u128); + let raw_value = (x.get_raw_value()); math128::log2_64(raw_value) } public fun ln_plus_32ln2(x: FixedPoint64): FixedPoint64 { - let raw_value = fixed_point64::get_raw_value(x); - let x = (fixed_point64::get_raw_value(math128::log2_64(raw_value)) as u256); + let raw_value = x.get_raw_value(); + let x = (math128::log2_64(raw_value).get_raw_value() as u256); fixed_point64::create_from_raw_value(((x * LN2) >> 64 as u128)) } /// Integer power of a fixed point number public fun pow(x: FixedPoint64, n: u64): FixedPoint64 { - let raw_value = (fixed_point64::get_raw_value(x) as u256); + let raw_value = (x.get_raw_value() as u256); fixed_point64::create_from_raw_value((pow_raw(raw_value, (n as u128)) as u128)) } /// Specialized function for x * y / z that omits intermediate shifting public fun mul_div(x: FixedPoint64, y: FixedPoint64, z: FixedPoint64): FixedPoint64 { - let a = fixed_point64::get_raw_value(x); - let b = fixed_point64::get_raw_value(y); - let c = fixed_point64::get_raw_value(z); + let a = x.get_raw_value(); + let b = y.get_raw_value(); + let c = z.get_raw_value(); fixed_point64::create_from_raw_value (math128::mul_div(a, b, c)) } @@ -69,7 +69,7 @@ module aptos_std::math_fixed64 { // 2^(1/580) = roottwo(1 - eps), so the number we seek is roottwo^exponent (1 - eps * exponent) let power = pow_raw(roottwo, (exponent as u128)); let eps_correction = 219071715585908898; - power = power - ((power * eps_correction * exponent) >> 128); + power -= ((power * eps_correction * exponent) >> 128); // x is fixed point number smaller than bigfactor/2^64 < 0.0011 so we need only 5 tayler steps // to get the 15 digits of precission let taylor1 = (power * x) >> (64 - shift); @@ -88,7 +88,7 @@ module aptos_std::math_fixed64 { if (n & 1 != 0) { res = (res * x) >> 64; }; - n = n >> 1; + n >>= 1; x = (x * x) >> 64; }; res @@ -99,10 +99,10 @@ module aptos_std::math_fixed64 { // Sqrt is based on math128::sqrt and thus most of the testing is done there. let fixed_base = 1 << 64; let result = sqrt(fixed_point64::create_from_u128(1)); - assert!(fixed_point64::get_raw_value(result) == fixed_base, 0); + assert!(result.get_raw_value() == fixed_base, 0); let result = sqrt(fixed_point64::create_from_u128(2)); - assert_approx_the_same((fixed_point64::get_raw_value(result) as u256), 26087635650665564424, 16); + assert_approx_the_same((result.get_raw_value() as u256), 26087635650665564424, 16); } #[test] diff --git a/aptos-move/framework/aptos-stdlib/sources/pool_u64.move b/aptos-move/framework/aptos-stdlib/sources/pool_u64.move index f1aaea9fd947f..03d1832fdb6b7 100644 --- a/aptos-move/framework/aptos-stdlib/sources/pool_u64.move +++ b/aptos-move/framework/aptos-stdlib/sources/pool_u64.move @@ -17,6 +17,7 @@ module aptos_std::pool_u64 { use aptos_std::simple_map::{Self, SimpleMap}; use std::error; use std::vector; + use aptos_std::math64; /// Shareholder not present in pool. const ESHAREHOLDER_NOT_FOUND: u64 = 1; @@ -72,8 +73,8 @@ module aptos_std::pool_u64 { } /// Destroy an empty pool. This will fail if the pool has any balance of coins. - public fun destroy_empty(pool: Pool) { - assert!(pool.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY)); + public fun destroy_empty(self: Pool) { + assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY)); let Pool { shareholders_limit: _, total_coins: _, @@ -81,73 +82,73 @@ module aptos_std::pool_u64 { shares: _, shareholders: _, scaling_factor: _, - } = pool; + } = self; } - /// Return `pool`'s total balance of coins. - public fun total_coins(pool: &Pool): u64 { - pool.total_coins + /// Return `self`'s total balance of coins. + public fun total_coins(self: &Pool): u64 { + self.total_coins } - /// Return the total number of shares across all shareholders in `pool`. - public fun total_shares(pool: &Pool): u64 { - pool.total_shares + /// Return the total number of shares across all shareholders in `self`. + public fun total_shares(self: &Pool): u64 { + self.total_shares } - /// Return true if `shareholder` is in `pool`. - public fun contains(pool: &Pool, shareholder: address): bool { - simple_map::contains_key(&pool.shares, &shareholder) + /// Return true if `shareholder` is in `self`. + public fun contains(self: &Pool, shareholder: address): bool { + self.shares.contains_key(&shareholder) } - /// Return the number of shares of `stakeholder` in `pool`. - public fun shares(pool: &Pool, shareholder: address): u64 { - if (contains(pool, shareholder)) { - *simple_map::borrow(&pool.shares, &shareholder) + /// Return the number of shares of `stakeholder` in `self`. + public fun shares(self: &Pool, shareholder: address): u64 { + if (self.contains(shareholder)) { + *self.shares.borrow(&shareholder) } else { 0 } } - /// Return the balance in coins of `shareholder` in `pool.` - public fun balance(pool: &Pool, shareholder: address): u64 { - let num_shares = shares(pool, shareholder); - shares_to_amount(pool, num_shares) + /// Return the balance in coins of `shareholder` in `self`. + public fun balance(self: &Pool, shareholder: address): u64 { + let num_shares = self.shares(shareholder); + self.shares_to_amount(num_shares) } - /// Return the list of shareholders in `pool`. - public fun shareholders(pool: &Pool): vector
{ - pool.shareholders + /// Return the list of shareholders in `self`. + public fun shareholders(self: &Pool): vector
{ + self.shareholders } - /// Return the number of shareholders in `pool`. - public fun shareholders_count(pool: &Pool): u64 { - vector::length(&pool.shareholders) + /// Return the number of shareholders in `self`. + public fun shareholders_count(self: &Pool): u64 { + self.shareholders.length() } - /// Update `pool`'s total balance of coins. - public fun update_total_coins(pool: &mut Pool, new_total_coins: u64) { - pool.total_coins = new_total_coins; + /// Update `self`'s total balance of coins. + public fun update_total_coins(self: &mut Pool, new_total_coins: u64) { + self.total_coins = new_total_coins; } /// Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. - public fun buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u64 { + public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u64 { if (coins_amount == 0) return 0; - let new_shares = amount_to_shares(pool, coins_amount); - assert!(MAX_U64 - pool.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); - assert!(MAX_U64 - pool.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); + let new_shares = self.amount_to_shares(coins_amount); + assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); + assert!(MAX_U64 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); - pool.total_coins = pool.total_coins + coins_amount; - pool.total_shares = pool.total_shares + new_shares; - add_shares(pool, shareholder, new_shares); + self.total_coins += coins_amount; + self.total_shares += new_shares; + self.add_shares(shareholder, new_shares); new_shares } - /// Add the number of shares directly for `shareholder` in `pool`. + /// Add the number of shares directly for `shareholder` in `self`. /// This would dilute other shareholders if the pool's balance of coins didn't change. - fun add_shares(pool: &mut Pool, shareholder: address, new_shares: u64): u64 { - if (contains(pool, shareholder)) { - let existing_shares = simple_map::borrow_mut(&mut pool.shares, &shareholder); + fun add_shares(self: &mut Pool, shareholder: address, new_shares: u64): u64 { + if (self.contains(shareholder)) { + let existing_shares = self.shares.borrow_mut(&shareholder); let current_shares = *existing_shares; assert!(MAX_U64 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW)); @@ -155,120 +156,115 @@ module aptos_std::pool_u64 { *existing_shares } else if (new_shares > 0) { assert!( - vector::length(&pool.shareholders) < pool.shareholders_limit, + self.shareholders.length() < self.shareholders_limit, error::invalid_state(ETOO_MANY_SHAREHOLDERS), ); - vector::push_back(&mut pool.shareholders, shareholder); - simple_map::add(&mut pool.shares, shareholder, new_shares); + self.shareholders.push_back(shareholder); + self.shares.add(shareholder, new_shares); new_shares } else { new_shares } } - /// Allow `shareholder` to redeem their shares in `pool` for coins. - public fun redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 { - assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES)); + /// Allow `shareholder` to redeem their shares in `self` for coins. + public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 { + assert!(self.contains(shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(self.shares(shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES)); if (shares_to_redeem == 0) return 0; - let redeemed_coins = shares_to_amount(pool, shares_to_redeem); - pool.total_coins = pool.total_coins - redeemed_coins; - pool.total_shares = pool.total_shares - shares_to_redeem; - deduct_shares(pool, shareholder, shares_to_redeem); + let redeemed_coins = self.shares_to_amount(shares_to_redeem); + self.total_coins -= redeemed_coins; + self.total_shares -= shares_to_redeem; + self.deduct_shares(shareholder, shares_to_redeem); redeemed_coins } /// Transfer shares from `shareholder_1` to `shareholder_2`. public fun transfer_shares( - pool: &mut Pool, + self: &mut Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64, ) { - assert!(contains(pool, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES)); + assert!(self.contains(shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(self.shares(shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES)); if (shares_to_transfer == 0) return; - deduct_shares(pool, shareholder_1, shares_to_transfer); - add_shares(pool, shareholder_2, shares_to_transfer); + self.deduct_shares(shareholder_1, shares_to_transfer); + self.add_shares(shareholder_2, shares_to_transfer); } - /// Directly deduct `shareholder`'s number of shares in `pool` and return the number of remaining shares. - fun deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u64): u64 { - assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES)); + /// Directly deduct `shareholder`'s number of shares in `self` and return the number of remaining shares. + fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u64): u64 { + assert!(self.contains(shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(self.shares(shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES)); - let existing_shares = simple_map::borrow_mut(&mut pool.shares, &shareholder); - *existing_shares = *existing_shares - num_shares; + let existing_shares = self.shares.borrow_mut(&shareholder); + *existing_shares -= num_shares; // Remove the shareholder completely if they have no shares left. let remaining_shares = *existing_shares; if (remaining_shares == 0) { - let (_, shareholder_index) = vector::index_of(&pool.shareholders, &shareholder); - vector::remove(&mut pool.shareholders, shareholder_index); - simple_map::remove(&mut pool.shares, &shareholder); + let (_, shareholder_index) = self.shareholders.index_of(&shareholder); + self.shareholders.remove(shareholder_index); + self.shares.remove(&shareholder); }; remaining_shares } - /// Return the number of new shares `coins_amount` can buy in `pool`. + /// Return the number of new shares `coins_amount` can buy in `self`. /// `amount` needs to big enough to avoid rounding number. - public fun amount_to_shares(pool: &Pool, coins_amount: u64): u64 { - amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins) + public fun amount_to_shares(self: &Pool, coins_amount: u64): u64 { + self.amount_to_shares_with_total_coins(coins_amount, self.total_coins) } - /// Return the number of new shares `coins_amount` can buy in `pool` with a custom total coins number. + /// Return the number of new shares `coins_amount` can buy in `self` with a custom total coins number. /// `amount` needs to big enough to avoid rounding number. - public fun amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u64 { + public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u64 { // No shares yet so amount is worth the same number of shares. - if (pool.total_coins == 0 || pool.total_shares == 0) { + if (self.total_coins == 0 || self.total_shares == 0) { // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems. // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow. - coins_amount * pool.scaling_factor + coins_amount * self.scaling_factor } else { // Shares price = total_coins / total existing shares. // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount. // We rearrange the calc and do multiplication first to avoid rounding errors. - multiply_then_divide(pool, coins_amount, pool.total_shares, total_coins) + self.multiply_then_divide(coins_amount, self.total_shares, total_coins) } } - /// Return the number of coins `shares` are worth in `pool`. + /// Return the number of coins `shares` are worth in `self`. /// `shares` needs to big enough to avoid rounding number. - public fun shares_to_amount(pool: &Pool, shares: u64): u64 { - shares_to_amount_with_total_coins(pool, shares, pool.total_coins) + public fun shares_to_amount(self: &Pool, shares: u64): u64 { + self.shares_to_amount_with_total_coins(shares, self.total_coins) } - /// Return the number of coins `shares` are worth in `pool` with a custom total coins number. + /// Return the number of coins `shares` are worth in `self` with a custom total coins number. /// `shares` needs to big enough to avoid rounding number. - public fun shares_to_amount_with_total_coins(pool: &Pool, shares: u64, total_coins: u64): u64 { + public fun shares_to_amount_with_total_coins(self: &Pool, shares: u64, total_coins: u64): u64 { // No shares or coins yet so shares are worthless. - if (pool.total_coins == 0 || pool.total_shares == 0) { + if (self.total_coins == 0 || self.total_shares == 0) { 0 } else { // Shares price = total_coins / total existing shares. // Shares worth = shares * shares price = shares * total_coins / total existing shares. // We rearrange the calc and do multiplication first to avoid rounding errors. - multiply_then_divide(pool, shares, total_coins, pool.total_shares) + self.multiply_then_divide(shares, total_coins, self.total_shares) } } - public fun multiply_then_divide(_pool: &Pool, x: u64, y: u64, z: u64): u64 { - let result = (to_u128(x) * to_u128(y)) / to_u128(z); - (result as u64) - } - - fun to_u128(num: u64): u128 { - (num as u128) + public fun multiply_then_divide(self: &Pool, x: u64, y: u64, z: u64): u64 { + math64::mul_div(x, y, z) } #[test_only] - public fun destroy_pool(pool: Pool) { + public fun destroy_pool(self: Pool) { let Pool { shareholders_limit: _, total_coins: _, @@ -276,7 +272,7 @@ module aptos_std::pool_u64 { shares: _, shareholders: _, scaling_factor: _, - } = pool; + } = self; } #[test] @@ -284,76 +280,76 @@ module aptos_std::pool_u64 { let pool = new(5); // Shareholders 1 and 2 buy in first. - buy_in(&mut pool, @1, 1000); - buy_in(&mut pool, @2, 2000); - assert!(shareholders_count(&pool) == 2, 0); - assert!(total_coins(&pool) == 3000, 1); - assert!(total_shares(&pool) == 3000, 2); - assert!(shares(&pool, @1) == 1000, 3); - assert!(shares(&pool, @2) == 2000, 4); - assert!(balance(&pool, @1) == 1000, 5); - assert!(balance(&pool, @2) == 2000, 6); + pool.buy_in(@1, 1000); + pool.buy_in(@2, 2000); + assert!(pool.shareholders_count() == 2, 0); + assert!(pool.total_coins() == 3000, 1); + assert!(pool.total_shares() == 3000, 2); + assert!(pool.shares(@1) == 1000, 3); + assert!(pool.shares(@2) == 2000, 4); + assert!(pool.balance(@1) == 1000, 5); + assert!(pool.balance(@2) == 2000, 6); // Pool increases in value. - update_total_coins(&mut pool, 5000); - assert!(shares(&pool, @1) == 1000, 7); - assert!(shares(&pool, @2) == 2000, 8); + pool.update_total_coins(5000); + assert!(pool.shares(@1) == 1000, 7); + assert!(pool.shares(@2) == 2000, 8); let expected_balance_1 = 1000 * 5000 / 3000; - assert!(balance(&pool, @1) == expected_balance_1, 9); + assert!(pool.balance(@1) == expected_balance_1, 9); let expected_balance_2 = 2000 * 5000 / 3000; - assert!(balance(&pool, @2) == expected_balance_2, 10); + assert!(pool.balance(@2) == expected_balance_2, 10); // Shareholder 3 buys in into the 5000-coin pool with 1000 coins. There are 3000 existing shares. let expected_shares = 1000 * 3000 / 5000; - buy_in(&mut pool, @3, 1000); - assert!(shares(&pool, @3) == expected_shares, 11); - assert!(balance(&pool, @3) == 1000, 12); + pool.buy_in(@3, 1000); + assert!(pool.shares(@3) == expected_shares, 11); + assert!(pool.balance(@3) == 1000, 12); // Pool increases more in value. - update_total_coins(&mut pool, 8000); + pool.update_total_coins(8000); // Shareholders 1 and 2 redeem. let all_shares = 3000 + expected_shares; - assert!(total_shares(&pool) == all_shares, 13); + assert!(pool.total_shares() == all_shares, 13); let expected_value_per_500_shares = 500 * 8000 / all_shares; - assert!(redeem_shares(&mut pool, @1, 500) == expected_value_per_500_shares, 14); - assert!(redeem_shares(&mut pool, @1, 500) == expected_value_per_500_shares, 15); - assert!(redeem_shares(&mut pool, @2, 2000) == expected_value_per_500_shares * 4, 16); + assert!(pool.redeem_shares(@1, 500) == expected_value_per_500_shares, 14); + assert!(pool.redeem_shares(@1, 500) == expected_value_per_500_shares, 15); + assert!(pool.redeem_shares(@2, 2000) == expected_value_per_500_shares * 4, 16); // Due to a very small rounding error of 1, shareholder 3 actually has 1 more coin. let shareholder_3_balance = expected_value_per_500_shares * 6 / 5 + 1; - assert!(balance(&pool, @3) == shareholder_3_balance, 17); - assert!(total_coins(&pool) == shareholder_3_balance, 18); - assert!(shareholders_count(&pool) == 1, 19); - let num_shares_3 = shares(&pool, @3); - assert!(redeem_shares(&mut pool, @3, num_shares_3) == shareholder_3_balance, 20); + assert!(pool.balance(@3) == shareholder_3_balance, 17); + assert!(pool.total_coins() == shareholder_3_balance, 18); + assert!(pool.shareholders_count() == 1, 19); + let num_shares_3 = pool.shares(@3); + assert!(pool.redeem_shares(@3, num_shares_3) == shareholder_3_balance, 20); // Nothing left. - assert!(shareholders_count(&pool) == 0, 21); - destroy_empty(pool); + assert!(pool.shareholders_count() == 0, 21); + pool.destroy_empty(); } #[test] #[expected_failure(abort_code = 196611, location = Self)] public entry fun test_destroy_empty_should_fail_if_not_empty() { let pool = new(1); - update_total_coins(&mut pool, 100); - destroy_empty(pool); + pool.update_total_coins(100); + pool.destroy_empty(); } #[test] public entry fun test_buy_in_and_redeem_large_numbers() { let pool = new(2); let half_max_u64 = MAX_U64 / 2; - let shares_1 = buy_in(&mut pool, @1, half_max_u64); + let shares_1 = pool.buy_in(@1, half_max_u64); assert!(shares_1 == half_max_u64, 0); - let shares_2 = buy_in(&mut pool, @2, half_max_u64 + 1); + let shares_2 = pool.buy_in(@2, half_max_u64 + 1); assert!(shares_2 == half_max_u64 + 1, 1); - assert!(total_shares(&pool) == MAX_U64, 2); - assert!(total_coins(&pool) == MAX_U64, 3); - assert!(redeem_shares(&mut pool, @1, shares_1) == half_max_u64, 4); - assert!(redeem_shares(&mut pool, @2, shares_2) == half_max_u64 + 1, 5); - destroy_empty(pool); + assert!(pool.total_shares() == MAX_U64, 2); + assert!(pool.total_coins() == MAX_U64, 3); + assert!(pool.redeem_shares(@1, shares_1) == half_max_u64, 4); + assert!(pool.redeem_shares(@2, shares_2) == half_max_u64 + 1, 5); + pool.destroy_empty(); } #[test] @@ -361,211 +357,211 @@ module aptos_std::pool_u64 { let scaling_factor = 100; let pool = create_with_scaling_factor(2, scaling_factor); let coins_amount = MAX_U64 / 100; - let shares = buy_in(&mut pool, @1, coins_amount); - assert!(total_shares(&pool) == coins_amount * scaling_factor, 0); - assert!(total_coins(&pool) == coins_amount, 1); - assert!(redeem_shares(&mut pool, @1, shares) == coins_amount, 2); - destroy_empty(pool); + let shares = pool.buy_in(@1, coins_amount); + assert!(pool.total_shares() == coins_amount * scaling_factor, 0); + assert!(pool.total_coins() == coins_amount, 1); + assert!(pool.redeem_shares(@1, shares) == coins_amount, 2); + pool.destroy_empty(); } #[test] public entry fun test_buy_in_zero_amount() { let pool = new(2); - buy_in(&mut pool, @1, 100); - assert!(buy_in(&mut pool, @2, 0) == 0, 0); - assert!(total_shares(&pool) == shares(&pool, @1), 1); - assert!(shareholders_count(&pool) == 1, 2); - destroy_pool(pool); + pool.buy_in(@1, 100); + assert!(pool.buy_in(@2, 0) == 0, 0); + assert!(pool.total_shares() == pool.shares(@1), 1); + assert!(pool.shareholders_count() == 1, 2); + pool.destroy_pool(); } #[test] public entry fun test_buy_in_with_small_coins_amount() { let pool = new(2); // Shareholder 1 buys in with 1e17 coins. - buy_in(&mut pool, @1, 100000000000000000); + pool.buy_in(@1, 100000000000000000); // Shareholder 2 buys in with a very small amount. - assert!(buy_in(&mut pool, @2, 1) == 1, 0); + assert!(pool.buy_in(@2, 1) == 1, 0); // Pool's total coins increases by 20%. Shareholder 2 shouldn't see any actual balance increase as it gets // rounded down. - let total_coins = total_coins(&pool); - update_total_coins(&mut pool, total_coins * 6 / 5); + let total_coins = pool.total_coins(); + pool.update_total_coins(total_coins * 6 / 5); // Minus 1 due to rounding error. - assert!(balance(&pool, @1) == 100000000000000000 * 6 / 5 - 1, 1); - assert!(balance(&pool, @2) == 1, 2); - destroy_pool(pool); + assert!(pool.balance(@1) == 100000000000000000 * 6 / 5 - 1, 1); + assert!(pool.balance(@2) == 1, 2); + pool.destroy_pool(); } #[test] public entry fun test_add_zero_shares_should_not_add_shareholder() { let pool = new(1); - update_total_coins(&mut pool, 1000); - assert!(add_shares(&mut pool, @1, 0) == 0, 0); - assert!(shareholders_count(&pool) == 0, 1); - destroy_pool(pool); + pool.update_total_coins(1000); + assert!(pool.add_shares(@1, 0) == 0, 0); + assert!(pool.shareholders_count() == 0, 1); + pool.destroy_pool(); } #[test] public entry fun test_add_zero_shares_returns_existing_number_of_shares() { let pool = new(1); - update_total_coins(&mut pool, 1000); - add_shares(&mut pool, @1, 1); - assert!(shares(&pool, @1) == add_shares(&mut pool, @1, 0), 0); - destroy_pool(pool); + pool.update_total_coins(1000); + pool.add_shares(@1, 1); + assert!(pool.shares(@1) == pool.add_shares(@1, 0), 0); + pool.destroy_pool(); } #[test] public entry fun test_add_shares_existing_shareholder() { let pool = new(1); - update_total_coins(&mut pool, 1000); - add_shares(&mut pool, @1, 1); - add_shares(&mut pool, @1, 2); - assert!(shares(&mut pool, @1) == 3, 0); - destroy_pool(pool); + pool.update_total_coins(1000); + pool.add_shares(@1, 1); + pool.add_shares(@1, 2); + assert!(pool.shares(@1) == 3, 0); + pool.destroy_pool(); } #[test] public entry fun test_add_shares_new_shareholder() { let pool = new(2); - update_total_coins(&mut pool, 1000); - add_shares(&mut pool, @1, 1); - add_shares(&mut pool, @2, 2); - assert!(shares(&mut pool, @1) == 1, 0); - assert!(shares(&mut pool, @2) == 2, 1); - destroy_pool(pool); + pool.update_total_coins(1000); + pool.add_shares(@1, 1); + pool.add_shares(@2, 2); + assert!(pool.shares(@1) == 1, 0); + assert!(pool.shares(@2) == 2, 1); + pool.destroy_pool(); } #[test] #[expected_failure(abort_code = 196610, location = Self)] public entry fun test_add_shares_should_enforce_shareholder_limit() { let pool = new(2); - add_shares(&mut pool, @1, 1); - add_shares(&mut pool, @2, 2); - add_shares(&mut pool, @3, 2); - destroy_pool(pool); + pool.add_shares(@1, 1); + pool.add_shares(@2, 2); + pool.add_shares(@3, 2); + pool.destroy_pool(); } #[test] public entry fun test_add_shares_should_work_after_reducing_shareholders_below_limit() { let pool = new(3); - add_shares(&mut pool, @1, 1); - add_shares(&mut pool, @2, 2); - deduct_shares(&mut pool, @2, 2); - add_shares(&mut pool, @3, 3); - assert!(shares(&pool, @3) == 3, 0); - destroy_pool(pool); + pool.add_shares(@1, 1); + pool.add_shares(@2, 2); + pool.deduct_shares(@2, 2); + pool.add_shares(@3, 3); + assert!(pool.shares(@3) == 3, 0); + pool.destroy_pool(); } #[test] #[expected_failure(abort_code = 65537, location = Self)] public entry fun test_redeem_shares_non_existent_shareholder() { let pool = new(1); - add_shares(&mut pool, @1, 1); - redeem_shares(&mut pool, @2, 1); - destroy_pool(pool); + pool.add_shares(@1, 1); + pool.redeem_shares(@2, 1); + pool.destroy_pool(); } #[test] #[expected_failure(abort_code = 65540, location = Self)] public entry fun test_redeem_shares_insufficient_shares() { let pool = new(1); - add_shares(&mut pool, @1, 1); - redeem_shares(&mut pool, @1, 2); - destroy_pool(pool); + pool.add_shares(@1, 1); + pool.redeem_shares(@1, 2); + pool.destroy_pool(); } #[test] public entry fun test_redeem_small_number_of_shares() { let pool = new(2); // 1e17 shares and coins. - buy_in(&mut pool, @1, 100000000000000000); - buy_in(&mut pool, @2, 100000000000000000); - assert!(redeem_shares(&mut pool, @1, 1) == 1, 0); - destroy_pool(pool); + pool.buy_in(@1, 100000000000000000); + pool.buy_in(@2, 100000000000000000); + assert!(pool.redeem_shares(@1, 1) == 1, 0); + pool.destroy_pool(); } #[test] public entry fun test_redeem_zero_shares() { let pool = new(2); - buy_in(&mut pool, @1, 1); - assert!(redeem_shares(&mut pool, @1, 0) == 0, 0); - assert!(shares(&pool, @1) == 1, 1); - assert!(total_coins(&pool) == 1, 2); - assert!(total_shares(&pool) == 1, 3); - destroy_pool(pool); + pool.buy_in(@1, 1); + assert!(pool.redeem_shares(@1, 0) == 0, 0); + assert!(pool.shares(@1) == 1, 1); + assert!(pool.total_coins() == 1, 2); + assert!(pool.total_shares() == 1, 3); + pool.destroy_pool(); } #[test] #[expected_failure(abort_code = 65537, location = Self)] public entry fun test_deduct_shares_non_existent_shareholder() { let pool = new(1); - add_shares(&mut pool, @1, 1); - deduct_shares(&mut pool, @2, 1); - destroy_pool(pool); + pool.add_shares(@1, 1); + pool.deduct_shares(@2, 1); + pool.destroy_pool(); } #[test] #[expected_failure(abort_code = 65540, location = Self)] public entry fun test_deduct_shares_insufficient_shares() { let pool = new(1); - add_shares(&mut pool, @1, 1); - deduct_shares(&mut pool, @1, 2); - destroy_pool(pool); + pool.add_shares(@1, 1); + pool.deduct_shares(@1, 2); + pool.destroy_pool(); } #[test] public entry fun test_deduct_shares_remove_shareholder_with_no_shares() { let pool = new(2); - add_shares(&mut pool, @1, 1); - add_shares(&mut pool, @2, 2); - assert!(shareholders_count(&pool) == 2, 0); - deduct_shares(&mut pool, @1, 1); - assert!(shareholders_count(&pool) == 1, 1); - destroy_pool(pool); + pool.add_shares(@1, 1); + pool.add_shares(@2, 2); + assert!(pool.shareholders_count() == 2, 0); + pool.deduct_shares(@1, 1); + assert!(pool.shareholders_count() == 1, 1); + pool.destroy_pool(); } #[test] public entry fun test_transfer_shares() { let pool = new(2); - add_shares(&mut pool, @1, 2); - add_shares(&mut pool, @2, 2); - assert!(shareholders_count(&pool) == 2, 0); - transfer_shares(&mut pool, @1, @2, 1); - assert!(shares(&pool, @1) == 1, 0); - assert!(shares(&pool, @2) == 3, 0); - destroy_pool(pool); + pool.add_shares(@1, 2); + pool.add_shares(@2, 2); + assert!(pool.shareholders_count() == 2, 0); + pool.transfer_shares(@1, @2, 1); + assert!(pool.shares(@1) == 1, 0); + assert!(pool.shares(@2) == 3, 0); + pool.destroy_pool(); } #[test] public entry fun test_amount_to_shares_empty_pool() { let pool = new(1); // No total shares and total coins. - assert!(amount_to_shares(&pool, 1000) == 1000, 0); + assert!(pool.amount_to_shares(1000) == 1000, 0); // No total shares but some total coins. - update_total_coins(&mut pool, 1000); - assert!(amount_to_shares(&pool, 1000) == 1000, 1); + pool.update_total_coins(1000); + assert!(pool.amount_to_shares(1000) == 1000, 1); // No total coins but some total shares. - update_total_coins(&mut pool, 0); - add_shares(&mut pool, @1, 100); - assert!(amount_to_shares(&pool, 1000) == 1000, 2); - destroy_pool(pool); + pool.update_total_coins(0); + pool.add_shares(@1, 100); + assert!(pool.amount_to_shares(1000) == 1000, 2); + pool.destroy_pool(); } #[test] public entry fun test_shares_to_amount_empty_pool() { let pool = new(1); // No total shares and total coins. - assert!(shares_to_amount(&pool, 1000) == 0, 0); + assert!(pool.shares_to_amount(1000) == 0, 0); // No total shares but some total coins. - update_total_coins(&mut pool, 1000); - assert!(shares_to_amount(&pool, 1000) == 0, 1); + pool.update_total_coins(1000); + assert!(pool.shares_to_amount(1000) == 0, 1); // No total coins but some total shares. - update_total_coins(&mut pool, 0); - add_shares(&mut pool, @1, 100); - assert!(shares_to_amount(&pool, 1000) == 0, 2); - destroy_pool(pool); + pool.update_total_coins(0); + pool.add_shares(@1, 100); + assert!(pool.shares_to_amount(1000) == 0, 2); + pool.destroy_pool(); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/pool_u64.spec.move b/aptos-move/framework/aptos-stdlib/sources/pool_u64.spec.move index 96e77747769e4..67f934dc9445a 100644 --- a/aptos-move/framework/aptos-stdlib/sources/pool_u64.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/pool_u64.spec.move @@ -30,9 +30,9 @@ spec aptos_std::pool_u64 { simple_map::spec_contains_key(pool.shares, shareholder) } - spec contains(pool: &Pool, shareholder: address): bool { + spec contains(self: &Pool, shareholder: address): bool { aborts_if false; - ensures result == spec_contains(pool, shareholder); + ensures result == spec_contains(self, shareholder); } spec fun spec_shares(pool: Pool, shareholder: address): u64 { @@ -44,62 +44,62 @@ spec aptos_std::pool_u64 { } } - spec shares(pool: &Pool, shareholder: address): u64 { + spec shares(self: &Pool, shareholder: address): u64 { aborts_if false; - ensures result == spec_shares(pool, shareholder); + ensures result == spec_shares(self, shareholder); } - spec balance(pool: &Pool, shareholder: address): u64 { - let shares = spec_shares(pool, shareholder); - let total_coins = pool.total_coins; - aborts_if pool.total_coins > 0 && pool.total_shares > 0 && (shares * total_coins) / pool.total_shares > MAX_U64; - ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins); + spec balance(self: &Pool, shareholder: address): u64 { + let shares = spec_shares(self, shareholder); + let total_coins = self.total_coins; + aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64; + ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins); } - spec buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u64 { - let new_shares = spec_amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins); - aborts_if pool.total_coins + coins_amount > MAX_U64; - aborts_if pool.total_shares + new_shares > MAX_U64; - include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares }; - include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares }; - ensures pool.total_coins == old(pool.total_coins) + coins_amount; - ensures pool.total_shares == old(pool.total_shares) + new_shares; + spec buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u64 { + let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins); + aborts_if self.total_coins + coins_amount > MAX_U64; + aborts_if self.total_shares + new_shares > MAX_U64; + include coins_amount > 0 ==> AddSharesAbortsIf { new_shares }; + include coins_amount > 0 ==> AddSharesEnsures { new_shares }; + ensures self.total_coins == old(self.total_coins) + coins_amount; + ensures self.total_shares == old(self.total_shares) + new_shares; ensures result == new_shares; } - spec add_shares(pool: &mut Pool, shareholder: address, new_shares: u64): u64 { + spec add_shares(self: &mut Pool, shareholder: address, new_shares: u64): u64 { include AddSharesAbortsIf; include AddSharesEnsures; - let key_exists = simple_map::spec_contains_key(pool.shares, shareholder); - ensures result == if (key_exists) { simple_map::spec_get(pool.shares, shareholder) } + let key_exists = simple_map::spec_contains_key(self.shares, shareholder); + ensures result == if (key_exists) { simple_map::spec_get(self.shares, shareholder) } else { new_shares }; } spec schema AddSharesAbortsIf { - pool: Pool; + self: Pool; shareholder: address; new_shares: u64; - let key_exists = simple_map::spec_contains_key(pool.shares, shareholder); - let current_shares = simple_map::spec_get(pool.shares, shareholder); + let key_exists = simple_map::spec_contains_key(self.shares, shareholder); + let current_shares = simple_map::spec_get(self.shares, shareholder); aborts_if key_exists && current_shares + new_shares > MAX_U64; - aborts_if !key_exists && new_shares > 0 && len(pool.shareholders) >= pool.shareholders_limit; + aborts_if !key_exists && new_shares > 0 && len(self.shareholders) >= self.shareholders_limit; } spec schema AddSharesEnsures { - pool: Pool; + self: Pool; shareholder: address; new_shares: u64; - let key_exists = simple_map::spec_contains_key(pool.shares, shareholder); - let current_shares = simple_map::spec_get(pool.shares, shareholder); + let key_exists = simple_map::spec_contains_key(self.shares, shareholder); + let current_shares = simple_map::spec_get(self.shares, shareholder); ensures key_exists ==> - pool.shares == simple_map::spec_set(old(pool.shares), shareholder, current_shares + new_shares); + self.shares == simple_map::spec_set(old(self.shares), shareholder, current_shares + new_shares); ensures (!key_exists && new_shares > 0) ==> - pool.shares == simple_map::spec_set(old(pool.shares), shareholder, new_shares); + self.shares == simple_map::spec_set(old(self.shares), shareholder, new_shares); ensures (!key_exists && new_shares > 0) ==> - vector::eq_push_back(pool.shareholders, old(pool.shareholders), shareholder); + vector::eq_push_back(self.shareholders, old(self.shareholders), shareholder); } spec fun spec_amount_to_shares_with_total_coins(pool: Pool, coins_amount: u64, total_coins: u64): u64 { @@ -111,19 +111,19 @@ spec aptos_std::pool_u64 { } } - spec amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u64 { - aborts_if pool.total_coins > 0 && pool.total_shares > 0 - && (coins_amount * pool.total_shares) / total_coins > MAX_U64; - aborts_if (pool.total_coins == 0 || pool.total_shares == 0) - && coins_amount * pool.scaling_factor > MAX_U64; - aborts_if pool.total_coins > 0 && pool.total_shares > 0 && total_coins == 0; - ensures result == spec_amount_to_shares_with_total_coins(pool, coins_amount, total_coins); + spec amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u64 { + aborts_if self.total_coins > 0 && self.total_shares > 0 + && (coins_amount * self.total_shares) / total_coins > MAX_U64; + aborts_if (self.total_coins == 0 || self.total_shares == 0) + && coins_amount * self.scaling_factor > MAX_U64; + aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0; + ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins); } - spec shares_to_amount_with_total_coins(pool: &Pool, shares: u64, total_coins: u64): u64 { - aborts_if pool.total_coins > 0 && pool.total_shares > 0 - && (shares * total_coins) / pool.total_shares > MAX_U64; - ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins); + spec shares_to_amount_with_total_coins(self: &Pool, shares: u64, total_coins: u64): u64 { + aborts_if self.total_coins > 0 && self.total_shares > 0 + && (shares * total_coins) / self.total_shares > MAX_U64; + ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins); } spec fun spec_shares_to_amount_with_total_coins(pool: Pool, shares: u64, total_coins: u64): u64 { @@ -135,52 +135,54 @@ spec aptos_std::pool_u64 { } } - spec multiply_then_divide(_pool: &Pool, x: u64, y: u64, z: u64): u64 { + spec multiply_then_divide(self: &Pool, x: u64, y: u64, z: u64): u64 { aborts_if z == 0; aborts_if (x * y) / z > MAX_U64; ensures result == (x * y) / z; } - spec redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 { - let redeemed_coins = spec_shares_to_amount_with_total_coins(pool, shares_to_redeem, pool.total_coins); - aborts_if !spec_contains(pool, shareholder); - aborts_if spec_shares(pool, shareholder) < shares_to_redeem; - aborts_if pool.total_coins < redeemed_coins; - aborts_if pool.total_shares < shares_to_redeem; - ensures pool.total_coins == old(pool.total_coins) - redeemed_coins; - ensures pool.total_shares == old(pool.total_shares) - shares_to_redeem; - include shares_to_redeem > 0 ==> DeductSharesEnsures { num_shares: shares_to_redeem }; + spec redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u64): u64 { + let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins); + aborts_if !spec_contains(self, shareholder); + aborts_if spec_shares(self, shareholder) < shares_to_redeem; + aborts_if self.total_coins < redeemed_coins; + aborts_if self.total_shares < shares_to_redeem; + ensures self.total_coins == old(self.total_coins) - redeemed_coins; + ensures self.total_shares == old(self.total_shares) - shares_to_redeem; + include shares_to_redeem > 0 ==> DeductSharesEnsures { + num_shares: shares_to_redeem + }; ensures result == redeemed_coins; } spec transfer_shares( - pool: &mut Pool, + self: &mut Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u64 ) { pragma aborts_if_is_partial; - aborts_if !spec_contains(pool, shareholder_1); - aborts_if spec_shares(pool, shareholder_1) < shares_to_transfer; + aborts_if !spec_contains(self, shareholder_1); + aborts_if spec_shares(self, shareholder_1) < shares_to_transfer; // TODO: difficult to specify due to the intermediate state problem. } - spec deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u64): u64 { - aborts_if !spec_contains(pool, shareholder); - aborts_if spec_shares(pool, shareholder) < num_shares; + spec deduct_shares(self: &mut Pool, shareholder: address, num_shares: u64): u64 { + aborts_if !spec_contains(self, shareholder); + aborts_if spec_shares(self, shareholder) < num_shares; include DeductSharesEnsures; - let remaining_shares = simple_map::spec_get(pool.shares, shareholder) - num_shares; - ensures remaining_shares > 0 ==> result == simple_map::spec_get(pool.shares, shareholder); + let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares; + ensures remaining_shares > 0 ==> result == simple_map::spec_get(self.shares, shareholder); ensures remaining_shares == 0 ==> result == 0; } spec schema DeductSharesEnsures { - pool: Pool; + self: Pool; shareholder: address; num_shares: u64; - let remaining_shares = simple_map::spec_get(pool.shares, shareholder) - num_shares; - ensures remaining_shares > 0 ==> simple_map::spec_get(pool.shares, shareholder) == remaining_shares; - ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(pool.shares, shareholder); - ensures remaining_shares == 0 ==> !vector::spec_contains(pool.shareholders, shareholder); + let remaining_shares = simple_map::spec_get(self.shares, shareholder) - num_shares; + ensures remaining_shares > 0 ==> simple_map::spec_get(self.shares, shareholder) == remaining_shares; + ensures remaining_shares == 0 ==> !simple_map::spec_contains_key(self.shares, shareholder); + ensures remaining_shares == 0 ==> !vector::spec_contains(self.shareholders, shareholder); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.move b/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.move index c9ab78e3b52a8..4b0e85c98d20f 100644 --- a/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.move +++ b/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.move @@ -16,6 +16,7 @@ module aptos_std::pool_u64_unbound { use aptos_std::table_with_length::{Self as table, TableWithLength as Table}; use std::error; + use aptos_std::math128; /// Shareholder not present in pool. const ESHAREHOLDER_NOT_FOUND: u64 = 1; @@ -69,202 +70,193 @@ module aptos_std::pool_u64_unbound { } /// Destroy an empty pool. This will fail if the pool has any balance of coins. - public fun destroy_empty(pool: Pool) { - assert!(pool.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY)); + public fun destroy_empty(self: Pool) { + assert!(self.total_coins == 0, error::invalid_state(EPOOL_IS_NOT_EMPTY)); let Pool { total_coins: _, total_shares: _, shares, scaling_factor: _, - } = pool; - table::destroy_empty(shares); + } = self; + shares.destroy_empty::(); } - /// Return `pool`'s total balance of coins. - public fun total_coins(pool: &Pool): u64 { - pool.total_coins + /// Return `self`'s total balance of coins. + public fun total_coins(self: &Pool): u64 { + self.total_coins } - /// Return the total number of shares across all shareholders in `pool`. - public fun total_shares(pool: &Pool): u128 { - pool.total_shares + /// Return the total number of shares across all shareholders in `self`. + public fun total_shares(self: &Pool): u128 { + self.total_shares } - /// Return true if `shareholder` is in `pool`. - public fun contains(pool: &Pool, shareholder: address): bool { - table::contains(&pool.shares, shareholder) + /// Return true if `shareholder` is in `self`. + public fun contains(self: &Pool, shareholder: address): bool { + self.shares.contains(shareholder) } - /// Return the number of shares of `stakeholder` in `pool`. - public fun shares(pool: &Pool, shareholder: address): u128 { - if (contains(pool, shareholder)) { - *table::borrow(&pool.shares, shareholder) + /// Return the number of shares of `stakeholder` in `self`. + public fun shares(self: &Pool, shareholder: address): u128 { + if (self.contains(shareholder)) { + *self.shares.borrow(shareholder) } else { 0 } } - /// Return the balance in coins of `shareholder` in `pool.` - public fun balance(pool: &Pool, shareholder: address): u64 { - let num_shares = shares(pool, shareholder); - shares_to_amount(pool, num_shares) + /// Return the balance in coins of `shareholder` in `self`. + public fun balance(self: &Pool, shareholder: address): u64 { + let num_shares = self.shares(shareholder); + self.shares_to_amount(num_shares) } - /// Return the number of shareholders in `pool`. - public fun shareholders_count(pool: &Pool): u64 { - table::length(&pool.shares) + /// Return the number of shareholders in `self`. + public fun shareholders_count(self: &Pool): u64 { + self.shares.length() } - /// Update `pool`'s total balance of coins. - public fun update_total_coins(pool: &mut Pool, new_total_coins: u64) { - pool.total_coins = new_total_coins; + /// Update `self`'s total balance of coins. + public fun update_total_coins(self: &mut Pool, new_total_coins: u64) { + self.total_coins = new_total_coins; } /// Allow an existing or new shareholder to add their coins to the pool in exchange for new shares. - public fun buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u128 { + public fun buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u128 { if (coins_amount == 0) return 0; - let new_shares = amount_to_shares(pool, coins_amount); - assert!(MAX_U64 - pool.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); - assert!(MAX_U128 - pool.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW)); + let new_shares = self.amount_to_shares(coins_amount); + assert!(MAX_U64 - self.total_coins >= coins_amount, error::invalid_argument(EPOOL_TOTAL_COINS_OVERFLOW)); + assert!(MAX_U128 - self.total_shares >= new_shares, error::invalid_argument(EPOOL_TOTAL_SHARES_OVERFLOW)); - pool.total_coins = pool.total_coins + coins_amount; - pool.total_shares = pool.total_shares + new_shares; - add_shares(pool, shareholder, new_shares); + self.total_coins += coins_amount; + self.total_shares += new_shares; + self.add_shares(shareholder, new_shares); new_shares } - /// Add the number of shares directly for `shareholder` in `pool`. + /// Add the number of shares directly for `shareholder` in `self`. /// This would dilute other shareholders if the pool's balance of coins didn't change. - fun add_shares(pool: &mut Pool, shareholder: address, new_shares: u128): u128 { - if (contains(pool, shareholder)) { - let existing_shares = table::borrow_mut(&mut pool.shares, shareholder); + fun add_shares(self: &mut Pool, shareholder: address, new_shares: u128): u128 { + if (self.contains(shareholder)) { + let existing_shares = self.shares.borrow_mut(shareholder); let current_shares = *existing_shares; assert!(MAX_U128 - current_shares >= new_shares, error::invalid_argument(ESHAREHOLDER_SHARES_OVERFLOW)); *existing_shares = current_shares + new_shares; *existing_shares } else if (new_shares > 0) { - table::add(&mut pool.shares, shareholder, new_shares); + self.shares.add(shareholder, new_shares); new_shares } else { new_shares } } - /// Allow `shareholder` to redeem their shares in `pool` for coins. - public fun redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 { - assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES)); + /// Allow `shareholder` to redeem their shares in `self` for coins. + public fun redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 { + assert!(self.contains(shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(self.shares(shareholder) >= shares_to_redeem, error::invalid_argument(EINSUFFICIENT_SHARES)); if (shares_to_redeem == 0) return 0; - let redeemed_coins = shares_to_amount(pool, shares_to_redeem); - pool.total_coins = pool.total_coins - redeemed_coins; - pool.total_shares = pool.total_shares - shares_to_redeem; - deduct_shares(pool, shareholder, shares_to_redeem); + let redeemed_coins = self.shares_to_amount(shares_to_redeem); + self.total_coins -= redeemed_coins; + self.total_shares -= shares_to_redeem; + self.deduct_shares(shareholder, shares_to_redeem); redeemed_coins } /// Transfer shares from `shareholder_1` to `shareholder_2`. public fun transfer_shares( - pool: &mut Pool, + self: &mut Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128, ) { - assert!(contains(pool, shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES)); + assert!(self.contains(shareholder_1), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(self.shares(shareholder_1) >= shares_to_transfer, error::invalid_argument(EINSUFFICIENT_SHARES)); if (shares_to_transfer == 0) return; - deduct_shares(pool, shareholder_1, shares_to_transfer); - add_shares(pool, shareholder_2, shares_to_transfer); + self.deduct_shares(shareholder_1, shares_to_transfer); + self.add_shares(shareholder_2, shares_to_transfer); } - /// Directly deduct `shareholder`'s number of shares in `pool` and return the number of remaining shares. - fun deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u128): u128 { - assert!(contains(pool, shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); - assert!(shares(pool, shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES)); + /// Directly deduct `shareholder`'s number of shares in `self` and return the number of remaining shares. + fun deduct_shares(self: &mut Pool, shareholder: address, num_shares: u128): u128 { + assert!(self.contains(shareholder), error::invalid_argument(ESHAREHOLDER_NOT_FOUND)); + assert!(self.shares(shareholder) >= num_shares, error::invalid_argument(EINSUFFICIENT_SHARES)); - let existing_shares = table::borrow_mut(&mut pool.shares, shareholder); - *existing_shares = *existing_shares - num_shares; + let existing_shares = self.shares.borrow_mut(shareholder); + *existing_shares -= num_shares; // Remove the shareholder completely if they have no shares left. let remaining_shares = *existing_shares; if (remaining_shares == 0) { - table::remove(&mut pool.shares, shareholder); + self.shares.remove(shareholder); }; remaining_shares } - /// Return the number of new shares `coins_amount` can buy in `pool`. + /// Return the number of new shares `coins_amount` can buy in `self`. /// `amount` needs to big enough to avoid rounding number. - public fun amount_to_shares(pool: &Pool, coins_amount: u64): u128 { - amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins) + public fun amount_to_shares(self: &Pool, coins_amount: u64): u128 { + self.amount_to_shares_with_total_coins(coins_amount, self.total_coins) } - /// Return the number of new shares `coins_amount` can buy in `pool` with a custom total coins number. + /// Return the number of new shares `coins_amount` can buy in `self` with a custom total coins number. /// `amount` needs to big enough to avoid rounding number. - public fun amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u128 { + public fun amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u128 { // No shares yet so amount is worth the same number of shares. - if (pool.total_coins == 0 || pool.total_shares == 0) { + if (self.total_coins == 0 || self.total_shares == 0) { // Multiply by scaling factor to minimize rounding errors during internal calculations for buy ins/redeems. // This can overflow but scaling factor is expected to be chosen carefully so this would not overflow. - to_u128(coins_amount) * to_u128(pool.scaling_factor) + (coins_amount as u128) * (self.scaling_factor as u128) } else { // Shares price = total_coins / total existing shares. // New number of shares = new_amount / shares_price = new_amount * existing_shares / total_amount. // We rearrange the calc and do multiplication first to avoid rounding errors. - multiply_then_divide(pool, to_u128(coins_amount), pool.total_shares, to_u128(total_coins)) + self.multiply_then_divide(coins_amount as u128, self.total_shares, total_coins as u128) } } - /// Return the number of coins `shares` are worth in `pool`. + /// Return the number of coins `shares` are worth in `self`. /// `shares` needs to big enough to avoid rounding number. - public fun shares_to_amount(pool: &Pool, shares: u128): u64 { - shares_to_amount_with_total_coins(pool, shares, pool.total_coins) + public fun shares_to_amount(self: &Pool, shares: u128): u64 { + self.shares_to_amount_with_total_coins(shares, self.total_coins) } - /// Return the number of coins `shares` are worth in `pool` with a custom total coins number. + /// Return the number of coins `shares` are worth in `self` with a custom total coins number. /// `shares` needs to big enough to avoid rounding number. - public fun shares_to_amount_with_total_coins(pool: &Pool, shares: u128, total_coins: u64): u64 { + public fun shares_to_amount_with_total_coins(self: &Pool, shares: u128, total_coins: u64): u64 { // No shares or coins yet so shares are worthless. - if (pool.total_coins == 0 || pool.total_shares == 0) { + if (self.total_coins == 0 || self.total_shares == 0) { 0 } else { // Shares price = total_coins / total existing shares. // Shares worth = shares * shares price = shares * total_coins / total existing shares. // We rearrange the calc and do multiplication first to avoid rounding errors. - (multiply_then_divide(pool, shares, to_u128(total_coins), pool.total_shares) as u64) + (self.multiply_then_divide(shares, total_coins as u128, self.total_shares) as u64) } } /// Return the number of coins `shares` are worth in `pool` with custom total coins and shares numbers. public fun shares_to_amount_with_total_stats( - pool: &Pool, + self: &Pool, shares: u128, total_coins: u64, total_shares: u128, ): u64 { - if (pool.total_coins == 0 || total_shares == 0) { + if (self.total_coins == 0 || total_shares == 0) { 0 } else { - (multiply_then_divide(pool, shares, to_u128(total_coins), total_shares) as u64) + (self.multiply_then_divide(shares, total_coins as u128, total_shares) as u64) } } - public fun multiply_then_divide(_pool: &Pool, x: u128, y: u128, z: u128): u128 { - let result = (to_u256(x) * to_u256(y)) / to_u256(z); - (result as u128) - } - - fun to_u128(num: u64): u128 { - (num as u128) - } - - fun to_u256(num: u128): u256 { - (num as u256) + public fun multiply_then_divide(self: &Pool, x: u128, y: u128, z: u128): u128 { + math128::mul_div(x, y, z) } } diff --git a/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.spec.move b/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.spec.move index 2a8570883ebea..ca25b25e921ce 100644 --- a/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/pool_u64_unbound.spec.move @@ -15,9 +15,9 @@ spec aptos_std::pool_u64_unbound { table::spec_contains(pool.shares, shareholder) } - spec contains(pool: &Pool, shareholder: address): bool { + spec contains(self: &Pool, shareholder: address): bool { aborts_if false; - ensures result == spec_contains(pool, shareholder); + ensures result == spec_contains(self, shareholder); } spec fun spec_shares(pool: Pool, shareholder: address): u64 { @@ -29,59 +29,59 @@ spec aptos_std::pool_u64_unbound { } } - spec shares(pool: &Pool, shareholder: address): u128 { + spec shares(self: &Pool, shareholder: address): u128 { aborts_if false; - ensures result == spec_shares(pool, shareholder); + ensures result == spec_shares(self, shareholder); } - spec balance(pool: &Pool, shareholder: address): u64 { - let shares = spec_shares(pool, shareholder); - let total_coins = pool.total_coins; - aborts_if pool.total_coins > 0 && pool.total_shares > 0 && (shares * total_coins) / pool.total_shares > MAX_U64; - ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins); + spec balance(self: &Pool, shareholder: address): u64 { + let shares = spec_shares(self, shareholder); + let total_coins = self.total_coins; + aborts_if self.total_coins > 0 && self.total_shares > 0 && (shares * total_coins) / self.total_shares > MAX_U64; + ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins); } - spec buy_in(pool: &mut Pool, shareholder: address, coins_amount: u64): u128 { - let new_shares = spec_amount_to_shares_with_total_coins(pool, coins_amount, pool.total_coins); - aborts_if pool.total_coins + coins_amount > MAX_U64; - aborts_if pool.total_shares + new_shares > MAX_U128; - include coins_amount > 0 ==> AddSharesAbortsIf { new_shares: new_shares }; - include coins_amount > 0 ==> AddSharesEnsures { new_shares: new_shares }; - ensures pool.total_coins == old(pool.total_coins) + coins_amount; - ensures pool.total_shares == old(pool.total_shares) + new_shares; + spec buy_in(self: &mut Pool, shareholder: address, coins_amount: u64): u128 { + let new_shares = spec_amount_to_shares_with_total_coins(self, coins_amount, self.total_coins); + aborts_if self.total_coins + coins_amount > MAX_U64; + aborts_if self.total_shares + new_shares > MAX_U128; + include coins_amount > 0 ==> AddSharesAbortsIf { new_shares }; + include coins_amount > 0 ==> AddSharesEnsures { new_shares }; + ensures self.total_coins == old(self.total_coins) + coins_amount; + ensures self.total_shares == old(self.total_shares) + new_shares; ensures result == new_shares; } - spec add_shares(pool: &mut Pool, shareholder: address, new_shares: u128): u128 { + spec add_shares(self: &mut Pool, shareholder: address, new_shares: u128): u128 { include AddSharesAbortsIf; include AddSharesEnsures; - let key_exists = table::spec_contains(pool.shares, shareholder); - ensures result == if (key_exists) { table::spec_get(pool.shares, shareholder) } + let key_exists = table::spec_contains(self.shares, shareholder); + ensures result == if (key_exists) { table::spec_get(self.shares, shareholder) } else { new_shares }; } spec schema AddSharesAbortsIf { - pool: Pool; + self: Pool; shareholder: address; new_shares: u64; - let key_exists = table::spec_contains(pool.shares, shareholder); - let current_shares = table::spec_get(pool.shares, shareholder); + let key_exists = table::spec_contains(self.shares, shareholder); + let current_shares = table::spec_get(self.shares, shareholder); aborts_if key_exists && current_shares + new_shares > MAX_U128; } spec schema AddSharesEnsures { - pool: Pool; + self: Pool; shareholder: address; new_shares: u64; - let key_exists = table::spec_contains(pool.shares, shareholder); - let current_shares = table::spec_get(pool.shares, shareholder); + let key_exists = table::spec_contains(self.shares, shareholder); + let current_shares = table::spec_get(self.shares, shareholder); ensures key_exists ==> - pool.shares == table::spec_set(old(pool.shares), shareholder, current_shares + new_shares); + self.shares == table::spec_set(old(self.shares), shareholder, current_shares + new_shares); ensures (!key_exists && new_shares > 0) ==> - pool.shares == table::spec_set(old(pool.shares), shareholder, new_shares); + self.shares == table::spec_set(old(self.shares), shareholder, new_shares); } spec fun spec_amount_to_shares_with_total_coins(pool: Pool, coins_amount: u64, total_coins: u64): u128 { @@ -93,19 +93,19 @@ spec aptos_std::pool_u64_unbound { } } - spec amount_to_shares_with_total_coins(pool: &Pool, coins_amount: u64, total_coins: u64): u128 { - aborts_if pool.total_coins > 0 && pool.total_shares > 0 - && (coins_amount * pool.total_shares) / total_coins > MAX_U128; - aborts_if (pool.total_coins == 0 || pool.total_shares == 0) - && coins_amount * pool.scaling_factor > MAX_U128; - aborts_if pool.total_coins > 0 && pool.total_shares > 0 && total_coins == 0; - ensures result == spec_amount_to_shares_with_total_coins(pool, coins_amount, total_coins); + spec amount_to_shares_with_total_coins(self: &Pool, coins_amount: u64, total_coins: u64): u128 { + aborts_if self.total_coins > 0 && self.total_shares > 0 + && (coins_amount * self.total_shares) / total_coins > MAX_U128; + aborts_if (self.total_coins == 0 || self.total_shares == 0) + && coins_amount * self.scaling_factor > MAX_U128; + aborts_if self.total_coins > 0 && self.total_shares > 0 && total_coins == 0; + ensures result == spec_amount_to_shares_with_total_coins(self, coins_amount, total_coins); } - spec shares_to_amount_with_total_coins(pool: &Pool, shares: u128, total_coins: u64): u64 { - aborts_if pool.total_coins > 0 && pool.total_shares > 0 - && (shares * total_coins) / pool.total_shares > MAX_U64; - ensures result == spec_shares_to_amount_with_total_coins(pool, shares, total_coins); + spec shares_to_amount_with_total_coins(self: &Pool, shares: u128, total_coins: u64): u64 { + aborts_if self.total_coins > 0 && self.total_shares > 0 + && (shares * total_coins) / self.total_shares > MAX_U64; + ensures result == spec_shares_to_amount_with_total_coins(self, shares, total_coins); } spec fun spec_shares_to_amount_with_total_coins(pool: Pool, shares: u128, total_coins: u64): u64 { @@ -117,72 +117,65 @@ spec aptos_std::pool_u64_unbound { } } - spec multiply_then_divide(_pool: &Pool, x: u128, y: u128, z: u128): u128 { + spec multiply_then_divide(self: &Pool, x: u128, y: u128, z: u128): u128 { aborts_if z == 0; aborts_if (x * y) / z > MAX_U128; ensures result == (x * y) / z; } - spec redeem_shares(pool: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 { - let redeemed_coins = spec_shares_to_amount_with_total_coins(pool, shares_to_redeem, pool.total_coins); - aborts_if !spec_contains(pool, shareholder); - aborts_if spec_shares(pool, shareholder) < shares_to_redeem; - aborts_if pool.total_coins < redeemed_coins; - aborts_if pool.total_shares < shares_to_redeem; - ensures pool.total_coins == old(pool.total_coins) - redeemed_coins; - ensures pool.total_shares == old(pool.total_shares) - shares_to_redeem; - include shares_to_redeem > 0 ==> DeductSharesEnsures { num_shares: shares_to_redeem }; + spec redeem_shares(self: &mut Pool, shareholder: address, shares_to_redeem: u128): u64 { + let redeemed_coins = spec_shares_to_amount_with_total_coins(self, shares_to_redeem, self.total_coins); + aborts_if !spec_contains(self, shareholder); + aborts_if spec_shares(self, shareholder) < shares_to_redeem; + aborts_if self.total_coins < redeemed_coins; + aborts_if self.total_shares < shares_to_redeem; + ensures self.total_coins == old(self.total_coins) - redeemed_coins; + ensures self.total_shares == old(self.total_shares) - shares_to_redeem; + include shares_to_redeem > 0 ==> DeductSharesEnsures { + num_shares: shares_to_redeem + }; ensures result == redeemed_coins; } spec transfer_shares( - pool: &mut Pool, + self: &mut Pool, shareholder_1: address, shareholder_2: address, shares_to_transfer: u128 ) { - aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(pool, shareholder_2) && - (spec_shares(pool, shareholder_2) + shares_to_transfer > MAX_U128); - aborts_if !spec_contains(pool, shareholder_1); - aborts_if spec_shares(pool, shareholder_1) < shares_to_transfer; - ensures shareholder_1 == shareholder_2 ==> spec_shares(old(pool), shareholder_1) == spec_shares(pool, shareholder_1); - ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(pool), shareholder_1) == shares_to_transfer)) ==> - !spec_contains(pool, shareholder_1); + aborts_if (shareholder_1 != shareholder_2) && shares_to_transfer > 0 && spec_contains(self, shareholder_2) && + (spec_shares(self, shareholder_2) + shares_to_transfer > MAX_U128); + aborts_if !spec_contains(self, shareholder_1); + aborts_if spec_shares(self, shareholder_1) < shares_to_transfer; + ensures shareholder_1 == shareholder_2 ==> spec_shares(old(self), shareholder_1) == spec_shares( + self, shareholder_1); + ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) == shares_to_transfer)) ==> + !spec_contains(self, shareholder_1); ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0) ==> - (spec_contains(pool, shareholder_2)); - ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(pool), shareholder_2)) ==> - (spec_contains(pool, shareholder_2) && spec_shares(pool, shareholder_2) == shares_to_transfer); - ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(pool), shareholder_2)) ==> - (spec_contains(pool, shareholder_2) && spec_shares(pool, shareholder_2) == spec_shares(old(pool), shareholder_2) + shares_to_transfer); - ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(pool), shareholder_1) > shares_to_transfer)) ==> - (spec_contains(pool, shareholder_1) && (spec_shares(pool, shareholder_1) == spec_shares(old(pool), shareholder_1) - shares_to_transfer)); + (spec_contains(self, shareholder_2)); + ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && !spec_contains(old(self), shareholder_2)) ==> + (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == shares_to_transfer); + ensures (shareholder_1 != shareholder_2 && shares_to_transfer > 0 && spec_contains(old(self), shareholder_2)) ==> + (spec_contains(self, shareholder_2) && spec_shares(self, shareholder_2) == spec_shares(old(self), shareholder_2) + shares_to_transfer); + ensures ((shareholder_1 != shareholder_2) && (spec_shares(old(self), shareholder_1) > shares_to_transfer)) ==> + (spec_contains(self, shareholder_1) && (spec_shares(self, shareholder_1) == spec_shares(old(self), shareholder_1) - shares_to_transfer)); } - spec deduct_shares(pool: &mut Pool, shareholder: address, num_shares: u128): u128 { - aborts_if !spec_contains(pool, shareholder); - aborts_if spec_shares(pool, shareholder) < num_shares; + spec deduct_shares(self: &mut Pool, shareholder: address, num_shares: u128): u128 { + aborts_if !spec_contains(self, shareholder); + aborts_if spec_shares(self, shareholder) < num_shares; include DeductSharesEnsures; - let remaining_shares = table::spec_get(pool.shares, shareholder) - num_shares; - ensures remaining_shares > 0 ==> result == table::spec_get(pool.shares, shareholder); + let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares; + ensures remaining_shares > 0 ==> result == table::spec_get(self.shares, shareholder); ensures remaining_shares == 0 ==> result == 0; } spec schema DeductSharesEnsures { - pool: Pool; + self: Pool; shareholder: address; num_shares: u64; - let remaining_shares = table::spec_get(pool.shares, shareholder) - num_shares; - ensures remaining_shares > 0 ==> table::spec_get(pool.shares, shareholder) == remaining_shares; - ensures remaining_shares == 0 ==> !table::spec_contains(pool.shares, shareholder); - } - - spec to_u128(num: u64): u128 { - aborts_if false; - ensures result == num; - } - - spec to_u256(num: u128): u256 { - aborts_if false; - ensures result == num; + let remaining_shares = table::spec_get(self.shares, shareholder) - num_shares; + ensures remaining_shares > 0 ==> table::spec_get(self.shares, shareholder) == remaining_shares; + ensures remaining_shares == 0 ==> !table::spec_contains(self.shares, shareholder); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/simple_map.move b/aptos-move/framework/aptos-stdlib/sources/simple_map.move index 98ae46cf6b30d..fa2e8819c79a9 100644 --- a/aptos-move/framework/aptos-stdlib/sources/simple_map.move +++ b/aptos-move/framework/aptos-stdlib/sources/simple_map.move @@ -4,6 +4,9 @@ /// 3) A Key can be found within O(N) time /// 4) The keys are unsorted. /// 5) Adds and removals take O(N) time +/// +/// DEPRECATED: since it's implementation is inneficient, it +/// has been deprecated in favor of `ordered_map.move`. module aptos_std::simple_map { use std::error; use std::option; @@ -14,6 +17,8 @@ module aptos_std::simple_map { /// Map key is not found const EKEY_NOT_FOUND: u64 = 2; + /// DEPRECATED: since it's implementation is inneficient, it + /// has been deprecated in favor of `ordered_map.move`. struct SimpleMap has copy, drop, store { data: vector>, } @@ -23,8 +28,8 @@ module aptos_std::simple_map { value: Value, } - public fun length(map: &SimpleMap): u64 { - vector::length(&map.data) + public fun length(self: &SimpleMap): u64 { + self.data.length() } /// Create an empty SimpleMap. @@ -40,7 +45,7 @@ module aptos_std::simple_map { values: vector, ): SimpleMap { let map = new(); - add_all(&mut map, keys, values); + map.add_all(keys, values); map } @@ -52,96 +57,92 @@ module aptos_std::simple_map { } public fun borrow( - map: &SimpleMap, + self: &SimpleMap, key: &Key, ): &Value { - let maybe_idx = find(map, key); - assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND)); - let idx = option::extract(&mut maybe_idx); - &vector::borrow(&map.data, idx).value + let maybe_idx = self.find(key); + assert!(maybe_idx.is_some(), error::invalid_argument(EKEY_NOT_FOUND)); + let idx = maybe_idx.extract(); + &self.data.borrow(idx).value } public fun borrow_mut( - map: &mut SimpleMap, + self: &mut SimpleMap, key: &Key, ): &mut Value { - let maybe_idx = find(map, key); - assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND)); - let idx = option::extract(&mut maybe_idx); - &mut vector::borrow_mut(&mut map.data, idx).value + let maybe_idx = self.find(key); + assert!(maybe_idx.is_some(), error::invalid_argument(EKEY_NOT_FOUND)); + let idx = maybe_idx.extract(); + &mut self.data.borrow_mut(idx).value } public fun contains_key( - map: &SimpleMap, + self: &SimpleMap, key: &Key, ): bool { - let maybe_idx = find(map, key); - option::is_some(&maybe_idx) + let maybe_idx = self.find(key); + maybe_idx.is_some() } - public fun destroy_empty(map: SimpleMap) { - let SimpleMap { data } = map; - vector::destroy_empty(data); + public fun destroy_empty(self: SimpleMap) { + let SimpleMap { data } = self; + data.destroy_empty(); } /// Add a key/value pair to the map. The key must not already exist. public fun add( - map: &mut SimpleMap, + self: &mut SimpleMap, key: Key, value: Value, ) { - let maybe_idx = find(map, &key); - assert!(option::is_none(&maybe_idx), error::invalid_argument(EKEY_ALREADY_EXISTS)); + let maybe_idx = self.find(&key); + assert!(maybe_idx.is_none(), error::invalid_argument(EKEY_ALREADY_EXISTS)); - vector::push_back(&mut map.data, Element { key, value }); + self.data.push_back(Element { key, value }); } /// Add multiple key/value pairs to the map. The keys must not already exist. public fun add_all( - map: &mut SimpleMap, + self: &mut SimpleMap, keys: vector, values: vector, ) { - vector::zip(keys, values, |key, value| { - add(map, key, value); + keys.zip(values, |key, value| { + self.add(key, value); }); } /// Insert key/value pair or update an existing key to a new value public fun upsert( - map: &mut SimpleMap, + self: &mut SimpleMap, key: Key, value: Value ): (std::option::Option, std::option::Option) { - let data = &mut map.data; - let len = vector::length(data); - let i = 0; - while (i < len) { - let element = vector::borrow(data, i); + let data = &mut self.data; + let len = data.length(); + for (i in 0..len) { + let element = data.borrow(i); if (&element.key == &key) { - vector::push_back(data, Element { key, value }); - vector::swap(data, i, len); - let Element { key, value } = vector::pop_back(data); + data.push_back(Element { key, value }); + data.swap(i, len); + let Element { key, value } = data.pop_back(); return (std::option::some(key), std::option::some(value)) }; - i = i + 1; }; - vector::push_back(&mut map.data, Element { key, value }); + self.data.push_back(Element { key, value }); (std::option::none(), std::option::none()) } /// Return all keys in the map. This requires keys to be copyable. - public fun keys(map: &SimpleMap): vector { - vector::map_ref(&map.data, |e| { - let e: &Element = e; + public fun keys(self: &SimpleMap): vector { + self.data.map_ref(|e| { e.key }) } /// Return all values in the map. This requires values to be copyable. - public fun values(map: &SimpleMap): vector { - vector::map_ref(&map.data, |e| { - let e: &Element = e; + public fun values(self: &SimpleMap): vector { + self.data.map_ref(|e| { e.value }) } @@ -149,14 +150,14 @@ module aptos_std::simple_map { /// Transform the map into two vectors with the keys and values respectively /// Primarily used to destroy a map public fun to_vec_pair( - map: SimpleMap): (vector, vector) { + self: SimpleMap): (vector, vector) { let keys: vector = vector::empty(); let values: vector = vector::empty(); - let SimpleMap { data } = map; - vector::for_each(data, |e| { + let SimpleMap { data } = self; + data.for_each(|e| { let Element { key, value } = e; - vector::push_back(&mut keys, key); - vector::push_back(&mut values, value); + keys.push_back(key); + values.push_back(value); }); (keys, values) } @@ -164,39 +165,37 @@ module aptos_std::simple_map { /// For maps that cannot be dropped this is a utility to destroy them /// using lambdas to destroy the individual keys and values. public inline fun destroy( - map: SimpleMap, + self: SimpleMap, dk: |Key|, dv: |Value| ) { - let (keys, values) = to_vec_pair(map); - vector::destroy(keys, |_k| dk(_k)); - vector::destroy(values, |_v| dv(_v)); + let (keys, values) = self.to_vec_pair(); + keys.destroy(|_k| dk(_k)); + values.destroy(|_v| dv(_v)); } /// Remove a key/value pair from the map. The key must exist. public fun remove( - map: &mut SimpleMap, + self: &mut SimpleMap, key: &Key, ): (Key, Value) { - let maybe_idx = find(map, key); - assert!(option::is_some(&maybe_idx), error::invalid_argument(EKEY_NOT_FOUND)); - let placement = option::extract(&mut maybe_idx); - let Element { key, value } = vector::swap_remove(&mut map.data, placement); + let maybe_idx = self.find(key); + assert!(maybe_idx.is_some(), error::invalid_argument(EKEY_NOT_FOUND)); + let placement = maybe_idx.extract(); + let Element { key, value } = self.data.swap_remove(placement); (key, value) } fun find( - map: &SimpleMap, + self: &SimpleMap, key: &Key, ): option::Option { - let leng = vector::length(&map.data); - let i = 0; - while (i < leng) { - let element = vector::borrow(&map.data, i); + let len = self.data.length(); + for (i in 0..len) { + let element = self.data.borrow(i); if (&element.key == key) { return option::some(i) }; - i = i + 1; }; option::none() } @@ -205,115 +204,115 @@ module aptos_std::simple_map { public fun test_add_remove_many() { let map = create(); - assert!(length(&map) == 0, 0); - assert!(!contains_key(&map, &3), 1); - add(&mut map, 3, 1); - assert!(length(&map) == 1, 2); - assert!(contains_key(&map, &3), 3); - assert!(borrow(&map, &3) == &1, 4); - *borrow_mut(&mut map, &3) = 2; - assert!(borrow(&map, &3) == &2, 5); - - assert!(!contains_key(&map, &2), 6); - add(&mut map, 2, 5); - assert!(length(&map) == 2, 7); - assert!(contains_key(&map, &2), 8); - assert!(borrow(&map, &2) == &5, 9); - *borrow_mut(&mut map, &2) = 9; - assert!(borrow(&map, &2) == &9, 10); - - remove(&mut map, &2); - assert!(length(&map) == 1, 11); - assert!(!contains_key(&map, &2), 12); - assert!(borrow(&map, &3) == &2, 13); - - remove(&mut map, &3); - assert!(length(&map) == 0, 14); - assert!(!contains_key(&map, &3), 15); - - destroy_empty(map); + assert!(map.length() == 0, 0); + assert!(!map.contains_key(&3), 1); + map.add(3, 1); + assert!(map.length() == 1, 2); + assert!(map.contains_key(&3), 3); + assert!(map.borrow(&3) == &1, 4); + *map.borrow_mut(&3) = 2; + assert!(map.borrow(&3) == &2, 5); + + assert!(!map.contains_key(&2), 6); + map.add(2, 5); + assert!(map.length() == 2, 7); + assert!(map.contains_key(&2), 8); + assert!(map.borrow(&2) == &5, 9); + *map.borrow_mut(&2) = 9; + assert!(map.borrow(&2) == &9, 10); + + map.remove(&2); + assert!(map.length() == 1, 11); + assert!(!map.contains_key(&2), 12); + assert!(map.borrow(&3) == &2, 13); + + map.remove(&3); + assert!(map.length() == 0, 14); + assert!(!map.contains_key(&3), 15); + + map.destroy_empty(); } #[test] public fun test_add_all() { let map = create(); - assert!(length(&map) == 0, 0); - add_all(&mut map, vector[1, 2, 3], vector[10, 20, 30]); - assert!(length(&map) == 3, 1); - assert!(borrow(&map, &1) == &10, 2); - assert!(borrow(&map, &2) == &20, 3); - assert!(borrow(&map, &3) == &30, 4); - - remove(&mut map, &1); - remove(&mut map, &2); - remove(&mut map, &3); - destroy_empty(map); + assert!(map.length() == 0, 0); + map.add_all(vector[1, 2, 3], vector[10, 20, 30]); + assert!(map.length() == 3, 1); + assert!(map.borrow(&1) == &10, 2); + assert!(map.borrow(&2) == &20, 3); + assert!(map.borrow(&3) == &30, 4); + + map.remove(&1); + map.remove(&2); + map.remove(&3); + map.destroy_empty(); } #[test] public fun test_keys() { let map = create(); - assert!(keys(&map) == vector[], 0); - add(&mut map, 2, 1); - add(&mut map, 3, 1); + assert!(map.keys() == vector[], 0); + map.add(2, 1); + map.add(3, 1); - assert!(keys(&map) == vector[2, 3], 0); + assert!(map.keys() == vector[2, 3], 0); } #[test] public fun test_values() { let map = create(); - assert!(values(&map) == vector[], 0); - add(&mut map, 2, 1); - add(&mut map, 3, 2); + assert!(map.values() == vector[], 0); + map.add(2, 1); + map.add(3, 2); - assert!(values(&map) == vector[1, 2], 0); + assert!(map.values() == vector[1, 2], 0); } #[test] #[expected_failure] public fun test_add_twice() { let map = create(); - add(&mut map, 3, 1); - add(&mut map, 3, 1); + map.add(3, 1); + map.add(3, 1); - remove(&mut map, &3); - destroy_empty(map); + map.remove(&3); + map.destroy_empty(); } #[test] #[expected_failure] public fun test_remove_twice() { let map = create(); - add(&mut map, 3, 1); - remove(&mut map, &3); - remove(&mut map, &3); + map.add(3, 1); + map.remove(&3); + map.remove(&3); - destroy_empty(map); + map.destroy_empty(); } #[test] public fun test_upsert_test() { let map = create(); // test adding 3 elements using upsert - upsert(&mut map, 1, 1); - upsert(&mut map, 2, 2); - upsert(&mut map, 3, 3); - - assert!(length(&map) == 3, 0); - assert!(contains_key(&map, &1), 1); - assert!(contains_key(&map, &2), 2); - assert!(contains_key(&map, &3), 3); - assert!(borrow(&map, &1) == &1, 4); - assert!(borrow(&map, &2) == &2, 5); - assert!(borrow(&map, &3) == &3, 6); + map.upsert::(1, 1); + map.upsert(2, 2); + map.upsert(3, 3); + + assert!(map.length() == 3, 0); + assert!(map.contains_key(&1), 1); + assert!(map.contains_key(&2), 2); + assert!(map.contains_key(&3), 3); + assert!(map.borrow(&1) == &1, 4); + assert!(map.borrow(&2) == &2, 5); + assert!(map.borrow(&3) == &3, 6); // change mapping 1->1 to 1->4 - upsert(&mut map, 1, 4); + map.upsert(1, 4); - assert!(length(&map) == 3, 7); - assert!(contains_key(&map, &1), 8); - assert!(borrow(&map, &1) == &4, 9); + assert!(map.length() == 3, 7); + assert!(map.contains_key(&1), 8); + assert!(map.borrow(&1) == &4, 9); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move b/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move index 35258eb37532d..f41adf45ae7f3 100644 --- a/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move @@ -86,33 +86,35 @@ spec aptos_std::simple_map { ensures [abstract] spec_len(result) == len(keys); ensures [abstract] forall k: Key: spec_contains_key(result, k) <==> vector::spec_contains(keys, k); ensures [abstract] forall i in 0..len(keys): - spec_get(result, vector::borrow(keys, i)) == vector::borrow(values, i); + spec_get(result, keys.borrow(i)) == values.borrow(i); } - spec to_vec_pair(map: SimpleMap): (vector, vector) { + spec to_vec_pair(self: SimpleMap): (vector, vector) { pragma intrinsic; pragma opaque; ensures [abstract] forall k: Key: vector::spec_contains(result_1, k) <==> - spec_contains_key(map, k); + spec_contains_key(self, k); ensures [abstract] forall i in 0..len(result_1): - spec_get(map, vector::borrow(result_1, i)) == vector::borrow(result_2, i); + spec_get(self, result_1.borrow(i)) == result_2.borrow(i); } spec upsert( - map: &mut SimpleMap, + self: &mut SimpleMap, key: Key, value: Value ): (std::option::Option, std::option::Option) { pragma intrinsic; pragma opaque; aborts_if [abstract] false; - ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_1); - ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_2); - ensures [abstract] spec_contains_key(map, key); - ensures [abstract] spec_get(map, key) == value; - ensures [abstract] spec_contains_key(old(map), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key)); - ensures [abstract] spec_contains_key(old(map), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old(map), key))); + ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_1); + ensures [abstract] !spec_contains_key(old(self), key) ==> option::is_none(result_2); + ensures [abstract] spec_contains_key(self, key); + ensures [abstract] spec_get(self, key) == value; + ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_1)) && (option::spec_borrow(result_1) == key)); + ensures [abstract] spec_contains_key(old(self), key) ==> ((option::is_some(result_2)) && (option::spec_borrow(result_2) == spec_get(old( + self + ), key))); } // Specification functions for tables diff --git a/aptos-move/framework/aptos-stdlib/sources/table.move b/aptos-move/framework/aptos-stdlib/sources/table.move index dbc85209dd8e0..c8253b9ea6cb1 100644 --- a/aptos-move/framework/aptos-stdlib/sources/table.move +++ b/aptos-move/framework/aptos-stdlib/sources/table.move @@ -7,6 +7,7 @@ module aptos_std::table { friend aptos_std::table_with_length; + friend aptos_std::storage_slots_allocator; /// Type of tables struct Table has store { @@ -23,73 +24,75 @@ module aptos_std::table { /// Add a new entry to the table. Aborts if an entry for this /// key already exists. The entry itself is not stored in the /// table, and cannot be discovered from it. - public fun add(table: &mut Table, key: K, val: V) { - add_box>(table, key, Box { val }) + public fun add(self: &mut Table, key: K, val: V) { + add_box>(self, key, Box { val }) } /// Acquire an immutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow(table: &Table, key: K): &V { - &borrow_box>(table, key).val + public fun borrow(self: &Table, key: K): &V { + &borrow_box>(self, key).val } /// Acquire an immutable reference to the value which `key` maps to. /// Returns specified default value if there is no entry for `key`. - public fun borrow_with_default(table: &Table, key: K, default: &V): &V { - if (!contains(table, copy key)) { + public fun borrow_with_default(self: &Table, key: K, default: &V): &V { + if (!self.contains(copy key)) { default } else { - borrow(table, copy key) + self.borrow(copy key) } } /// Acquire a mutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow_mut(table: &mut Table, key: K): &mut V { - &mut borrow_box_mut>(table, key).val + public fun borrow_mut(self: &mut Table, key: K): &mut V { + &mut borrow_box_mut>(self, key).val } /// Acquire a mutable reference to the value which `key` maps to. /// Insert the pair (`key`, `default`) first if there is no entry for `key`. - public fun borrow_mut_with_default(table: &mut Table, key: K, default: V): &mut V { - if (!contains(table, copy key)) { - add(table, copy key, default) + public fun borrow_mut_with_default(self: &mut Table, key: K, default: V): &mut V { + if (!self.contains(copy key)) { + self.add(copy key, default) }; - borrow_mut(table, key) + self.borrow_mut(key) } /// Insert the pair (`key`, `value`) if there is no entry for `key`. /// update the value of the entry for `key` to `value` otherwise - public fun upsert(table: &mut Table, key: K, value: V) { - if (!contains(table, copy key)) { - add(table, copy key, value) + public fun upsert(self: &mut Table, key: K, value: V) { + if (!self.contains(copy key)) { + self.add(copy key, value) } else { - let ref = borrow_mut(table, key); + let ref = self.borrow_mut(key); *ref = value; }; } - /// Remove from `table` and return the value which `key` maps to. + /// Remove from `self` and return the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun remove(table: &mut Table, key: K): V { - let Box { val } = remove_box>(table, key); + public fun remove(self: &mut Table, key: K): V { + let Box { val } = remove_box>(self, key); val } - /// Returns true iff `table` contains an entry for `key`. - public fun contains(table: &Table, key: K): bool { - contains_box>(table, key) + /// Returns true iff `self` contains an entry for `key`. + public fun contains(self: &Table, key: K): bool { + contains_box>(self, key) } #[test_only] /// Testing only: allows to drop a table even if it is not empty. - public fun drop_unchecked(table: Table) { - drop_unchecked_box>(table) + public fun drop_unchecked(self: Table) { + drop_unchecked_box>(self) } - public(friend) fun destroy(table: Table) { - destroy_empty_box>(&table); - drop_unchecked_box>(table) + /// Table cannot know if it is empty or not, so this method is not public, + /// and can be used only in modules that know by themselves that table is empty. + friend fun destroy_known_empty_unsafe(self: Table) { + destroy_empty_box>(&self); + drop_unchecked_box>(self) } #[test_only] @@ -102,11 +105,11 @@ module aptos_std::table { let t = new(); let key: u64 = 111; let error_code: u64 = 1; - assert!(!contains(&t, key), error_code); - upsert(&mut t, key, 12); - assert!(*borrow(&t, key) == 12, error_code); - upsert(&mut t, key, 23); - assert!(*borrow(&t, key) == 23, error_code); + assert!(!t.contains(key), error_code); + t.upsert(key, 12); + assert!(*t.borrow(key) == 12, error_code); + t.upsert(key, 23); + assert!(*t.borrow(key) == 23, error_code); move_to(&account, TableHolder { t }); } @@ -116,10 +119,10 @@ module aptos_std::table { let t = new(); let key: u64 = 100; let error_code: u64 = 1; - assert!(!contains(&t, key), error_code); - assert!(*borrow_with_default(&t, key, &12) == 12, error_code); - add(&mut t, key, 1); - assert!(*borrow_with_default(&t, key, &12) == 1, error_code); + assert!(!t.contains(key), error_code); + assert!(*t.borrow_with_default(key, &12) == 12, error_code); + t.add(key, 1); + assert!(*t.borrow_with_default(key, &12) == 1, error_code); move_to(&account, TableHolder{ t }); } diff --git a/aptos-move/framework/aptos-stdlib/sources/table.spec.move b/aptos-move/framework/aptos-stdlib/sources/table.spec.move index 139ae93a641e0..72f47bffc7725 100644 --- a/aptos-move/framework/aptos-stdlib/sources/table.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/table.spec.move @@ -6,7 +6,7 @@ spec aptos_std::table { spec Table { pragma intrinsic = map, map_new = new, - map_destroy_empty = destroy, + map_destroy_empty = destroy_known_empty_unsafe, map_has_key = contains, map_add_no_override = add, map_add_override_if_exists = upsert, @@ -14,6 +14,7 @@ spec aptos_std::table { map_borrow = borrow, map_borrow_mut = borrow_mut, map_borrow_mut_with_default = borrow_mut_with_default, + map_borrow_with_default = borrow_with_default, map_spec_get = spec_get, map_spec_set = spec_set, map_spec_del = spec_remove, @@ -24,7 +25,7 @@ spec aptos_std::table { pragma intrinsic; } - spec destroy { + spec destroy_known_empty_unsafe { pragma intrinsic; } @@ -44,6 +45,10 @@ spec aptos_std::table { pragma intrinsic; } + spec borrow_with_default { + pragma intrinsic; + } + spec upsert { pragma intrinsic; } diff --git a/aptos-move/framework/aptos-stdlib/sources/table_with_length.move b/aptos-move/framework/aptos-stdlib/sources/table_with_length.move index c56ff2b4224fc..098795e39e206 100644 --- a/aptos-move/framework/aptos-stdlib/sources/table_with_length.move +++ b/aptos-move/framework/aptos-stdlib/sources/table_with_length.move @@ -25,117 +25,117 @@ module aptos_std::table_with_length { } /// Destroy a table. The table must be empty to succeed. - public fun destroy_empty(table: TableWithLength) { - assert!(table.length == 0, error::invalid_state(ENOT_EMPTY)); - let TableWithLength { inner, length: _ } = table; - table::destroy(inner) + public fun destroy_empty(self: TableWithLength) { + assert!(self.length == 0, error::invalid_state(ENOT_EMPTY)); + let TableWithLength { inner, length: _ } = self; + inner.destroy_known_empty_unsafe() } /// Add a new entry to the table. Aborts if an entry for this /// key already exists. The entry itself is not stored in the /// table, and cannot be discovered from it. - public fun add(table: &mut TableWithLength, key: K, val: V) { - table::add(&mut table.inner, key, val); - table.length = table.length + 1; + public fun add(self: &mut TableWithLength, key: K, val: V) { + self.inner.add(key, val); + self.length += 1; } /// Acquire an immutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow(table: &TableWithLength, key: K): &V { - table::borrow(&table.inner, key) + public fun borrow(self: &TableWithLength, key: K): &V { + self.inner.borrow(key) } /// Acquire a mutable reference to the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun borrow_mut(table: &mut TableWithLength, key: K): &mut V { - table::borrow_mut(&mut table.inner, key) + public fun borrow_mut(self: &mut TableWithLength, key: K): &mut V { + self.inner.borrow_mut(key) } /// Returns the length of the table, i.e. the number of entries. - public fun length(table: &TableWithLength): u64 { - table.length + public fun length(self: &TableWithLength): u64 { + self.length } /// Returns true if this table is empty. - public fun empty(table: &TableWithLength): bool { - table.length == 0 + public fun empty(self: &TableWithLength): bool { + self.length == 0 } /// Acquire a mutable reference to the value which `key` maps to. /// Insert the pair (`key`, `default`) first if there is no entry for `key`. - public fun borrow_mut_with_default(table: &mut TableWithLength, key: K, default: V): &mut V { - if (table::contains(&table.inner, key)) { - table::borrow_mut(&mut table.inner, key) + public fun borrow_mut_with_default(self: &mut TableWithLength, key: K, default: V): &mut V { + if (self.inner.contains(key)) { + self.inner.borrow_mut(key) } else { - table::add(&mut table.inner, key, default); - table.length = table.length + 1; - table::borrow_mut(&mut table.inner, key) + self.inner.add(key, default); + self.length += 1; + self.inner.borrow_mut(key) } } /// Insert the pair (`key`, `value`) if there is no entry for `key`. /// update the value of the entry for `key` to `value` otherwise - public fun upsert(table: &mut TableWithLength, key: K, value: V) { - if (!table::contains(&table.inner, key)) { - add(table, copy key, value) + public fun upsert(self: &mut TableWithLength, key: K, value: V) { + if (!self.inner.contains(key)) { + self.add(copy key, value) } else { - let ref = table::borrow_mut(&mut table.inner, key); + let ref = self.inner.borrow_mut(key); *ref = value; }; } /// Remove from `table` and return the value which `key` maps to. /// Aborts if there is no entry for `key`. - public fun remove(table: &mut TableWithLength, key: K): V { - let val = table::remove(&mut table.inner, key); - table.length = table.length - 1; + public fun remove(self: &mut TableWithLength, key: K): V { + let val = self.inner.remove(key); + self.length -= 1; val } /// Returns true iff `table` contains an entry for `key`. - public fun contains(table: &TableWithLength, key: K): bool { - table::contains(&table.inner, key) + public fun contains(self: &TableWithLength, key: K): bool { + self.inner.contains(key) } #[test_only] /// Drop table even if not empty, only when testing. - public fun drop_unchecked(table: TableWithLength) { + public fun drop_unchecked(self: TableWithLength) { // Unpack table with length, dropping length count but not // inner table. - let TableWithLength{inner, length: _} = table; - table::drop_unchecked(inner); // Drop inner table. + let TableWithLength{inner, length: _} = self; + inner.drop_unchecked(); // Drop inner table. } #[test] /// Verify test-only drop functionality. fun test_drop_unchecked() { let table = new(); // Declare new table. - add(&mut table, true, false); // Add table entry. - drop_unchecked(table); // Drop table. + table.add(true, false); // Add table entry. + table.drop_unchecked(); // Drop table. } #[test] fun test_upsert() { let t = new(); // Table should not have key 0 yet - assert!(!contains(&t, 0), 1); + assert!(!t.contains(0), 1); // This should insert key 0, with value 10, and length should be 1 - upsert(&mut t, 0, 10); + t.upsert(0, 10); // Ensure the value is correctly set to 10 - assert!(*borrow(&t, 0) == 10, 1); + assert!(*t.borrow(0) == 10, 1); // Ensure the length is correctly set - assert!(length(&t) == 1, 1); + assert!(t.length() == 1, 1); // Lets upsert the value to something else, and verify it's correct - upsert(&mut t, 0, 23); - assert!(*borrow(&t, 0) == 23, 1); + t.upsert(0, 23); + assert!(*t.borrow(0) == 23, 1); // Since key 0 already existed, the length should not have changed - assert!(length(&t) == 1, 1); + assert!(t.length() == 1, 1); // If we upsert a non-existing key, the length should increase - upsert(&mut t, 1, 7); - assert!(length(&t) == 2, 1); + t.upsert(1, 7); + assert!(t.length() == 2, 1); - remove(&mut t, 0); - remove(&mut t, 1); - destroy_empty(t); + t.remove(0); + t.remove(1); + t.destroy_empty(); } } diff --git a/aptos-move/framework/aptos-stdlib/sources/type_info.move b/aptos-move/framework/aptos-stdlib/sources/type_info.move index 2ad3bba4041cc..70280c2e08a77 100644 --- a/aptos-move/framework/aptos-stdlib/sources/type_info.move +++ b/aptos-move/framework/aptos-stdlib/sources/type_info.move @@ -1,8 +1,10 @@ module aptos_std::type_info { use std::bcs; use std::features; - use std::string::{Self, String}; - use std::vector; + use std::string::String; + + #[test_only] + use std::string; // // Error codes @@ -24,16 +26,16 @@ module aptos_std::type_info { // Public functions // - public fun account_address(type_info: &TypeInfo): address { - type_info.account_address + public fun account_address(self: &TypeInfo): address { + self.account_address } - public fun module_name(type_info: &TypeInfo): vector { - type_info.module_name + public fun module_name(self: &TypeInfo): vector { + self.module_name } - public fun struct_name(type_info: &TypeInfo): vector { - type_info.struct_name + public fun struct_name(self: &TypeInfo): vector { + self.struct_name } /// Returns the current chain ID, mirroring what `aptos_framework::chain_id::get()` would return, except in `#[test]` @@ -65,27 +67,29 @@ module aptos_std::type_info { /// nesting patterns, as well as `test_size_of_val_vectors()` for an /// analysis of vector size dynamism. public fun size_of_val(val_ref: &T): u64 { - // Return vector length of vectorized BCS representation. - vector::length(&bcs::to_bytes(val_ref)) + bcs::serialized_size(val_ref) } #[test_only] use aptos_std::table::Table; + #[test_only] + use std::vector; + #[test] fun test_type_of() { let type_info = type_of(); - assert!(account_address(&type_info) == @aptos_std, 0); - assert!(module_name(&type_info) == b"type_info", 1); - assert!(struct_name(&type_info) == b"TypeInfo", 2); + assert!(type_info.account_address() == @aptos_std, 0); + assert!(type_info.module_name() == b"type_info", 1); + assert!(type_info.struct_name() == b"TypeInfo", 2); } #[test] fun test_type_of_with_type_arg() { let type_info = type_of>(); - assert!(account_address(&type_info) == @aptos_std, 0); - assert!(module_name(&type_info) == b"table", 1); - assert!(struct_name(&type_info) == b"Table<0x1::string::String, 0x1::string::String>", 2); + assert!(type_info.account_address() == @aptos_std, 0); + assert!(type_info.module_name() == b"table", 1); + assert!(type_info.struct_name() == b"Table<0x1::string::String, 0x1::string::String>", 2); } #[test(fx = @std)] @@ -127,9 +131,9 @@ module aptos_std::type_info { #[verify_only] fun verify_type_of() { let type_info = type_of(); - let account_address = account_address(&type_info); - let module_name = module_name(&type_info); - let struct_name = struct_name(&type_info); + let account_address = type_info.account_address(); + let module_name = type_info.module_name(); + let struct_name = type_info.struct_name(); spec { assert account_address == @aptos_std; assert module_name == b"type_info"; @@ -140,9 +144,9 @@ module aptos_std::type_info { #[verify_only] fun verify_type_of_generic() { let type_info = type_of(); - let account_address = account_address(&type_info); - let module_name = module_name(&type_info); - let struct_name = struct_name(&type_info); + let account_address = type_info.account_address(); + let module_name = type_info.module_name(); + let struct_name = type_info.struct_name(); spec { assert account_address == type_of().account_address; assert module_name == type_of().module_name; @@ -223,14 +227,14 @@ module aptos_std::type_info { // Declare a bool in a vector. let bool_vector = vector::singleton(false); // Push back another bool. - vector::push_back(&mut bool_vector, false); + bool_vector.push_back(false); // Assert size is 3 bytes (1 per element, 1 for base vector). assert!(size_of_val(&bool_vector) == 3, 0); // Get a some option, which is implemented as a vector. let u64_option = option::some(0); // Assert size is 9 bytes (8 per element, 1 for base vector). assert!(size_of_val(&u64_option) == 9, 0); - option::extract(&mut u64_option); // Remove the value inside. + u64_option.extract(); // Remove the value inside. // Assert size reduces to 1 byte. assert!(size_of_val(&u64_option) == 1, 0); } @@ -284,26 +288,26 @@ module aptos_std::type_info { let i = 0; // Declare loop counter. while (i < n_elems_cutoff_1) { // Iterate until first cutoff: // Add an element. - vector::push_back(&mut vector_u64, null_element); - i = i + 1; // Increment counter. + vector_u64.push_back(null_element); + i += 1; // Increment counter. }; // Vector base size is still 1 byte. assert!(size_of_val(&vector_u64) - element_size * i == base_size_1, 0); // Add another element, exceeding the cutoff. - vector::push_back(&mut vector_u64, null_element); - i = i + 1; // Increment counter. + vector_u64.push_back(null_element); + i += 1; // Increment counter. // Vector base size is now 2 bytes. assert!(size_of_val(&vector_u64) - element_size * i == base_size_2, 0); while (i < n_elems_cutoff_2) { // Iterate until second cutoff: // Add an element. - vector::push_back(&mut vector_u64, null_element); - i = i + 1; // Increment counter. + vector_u64.push_back(null_element); + i += 1; // Increment counter. }; // Vector base size is still 2 bytes. assert!(size_of_val(&vector_u64) - element_size * i == base_size_2, 0); // Add another element, exceeding the cutoff. - vector::push_back(&mut vector_u64, null_element); - i = i + 1; // Increment counter. + vector_u64.push_back(null_element); + i += 1; // Increment counter. // Vector base size is now 3 bytes. assert!(size_of_val(&vector_u64) - element_size * i == base_size_3, 0); // Repeat for custom struct. @@ -323,26 +327,26 @@ module aptos_std::type_info { i = 0; // Re-initialize loop counter. while (i < n_elems_cutoff_1) { // Iterate until first cutoff: // Add an element. - vector::push_back(&mut vector_complex, copy null_element); - i = i + 1; // Increment counter. + vector_complex.push_back(copy null_element); + i += 1; // Increment counter. }; assert!( // Vector base size is still 1 byte. size_of_val(&vector_complex) - element_size * i == base_size_1, 0); // Add another element, exceeding the cutoff. - vector::push_back(&mut vector_complex, null_element); - i = i + 1; // Increment counter. + vector_complex.push_back(null_element); + i += 1; // Increment counter. assert!( // Vector base size is now 2 bytes. size_of_val(&vector_complex) - element_size * i == base_size_2, 0); while (i < n_elems_cutoff_2) { // Iterate until second cutoff: // Add an element. - vector::push_back(&mut vector_complex, copy null_element); - i = i + 1; // Increment counter. + vector_complex.push_back(copy null_element); + i += 1; // Increment counter. }; assert!( // Vector base size is still 2 bytes. size_of_val(&vector_complex) - element_size * i == base_size_2, 0); // Add another element, exceeding the cutoff. - vector::push_back(&mut vector_complex, null_element); - i = i + 1; // Increment counter. + vector_complex.push_back(null_element); + i += 1; // Increment counter. assert!( // Vector base size is now 3 bytes. size_of_val(&vector_complex) - element_size * i == base_size_3, 0); } diff --git a/aptos-move/framework/aptos-stdlib/sources/type_info.spec.move b/aptos-move/framework/aptos-stdlib/sources/type_info.spec.move index ed3ed481c8008..0be8e9be0a840 100644 --- a/aptos-move/framework/aptos-stdlib/sources/type_info.spec.move +++ b/aptos-move/framework/aptos-stdlib/sources/type_info.spec.move @@ -7,7 +7,7 @@ spec aptos_std::type_info { // This function will abort if `T` is not a struct type. } - spec type_name(): string::String { + spec type_name(): String { // Move Prover natively supports this function. } @@ -30,7 +30,6 @@ spec aptos_std::type_info { } spec size_of_val(val_ref: &T): u64 { - aborts_if false; ensures result == spec_size_of_val(val_ref); } } diff --git a/aptos-move/framework/aptos-stdlib/tests/cryptography/federated_keyless_tests.move b/aptos-move/framework/aptos-stdlib/tests/cryptography/federated_keyless_tests.move new file mode 100644 index 0000000000000..926c93c6ce195 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/cryptography/federated_keyless_tests.move @@ -0,0 +1,33 @@ +#[test_only] +module aptos_std::federated_keyless_tests { + use aptos_std::federated_keyless; + use std::string::{utf8}; + use std::bcs; + + #[test] + fun test_deserialize_public_key() { + // The bytes below represent a Federated Keyless public key that looks like + // federated_keyless::PublicKey { + // jwk_address: @0xaa9b5e7acc48169fdc3809b614532a5a675cf7d4c80cd4aea732b47e328bda1a, + // keyless_public_key: keyless::PublicKey { + // iss: "https://accounts.google.com", + // idc: "0x86bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514" + // } + // } + // + let bytes = x"aa9b5e7acc48169fdc3809b614532a5a675cf7d4c80cd4aea732b47e328bda1a1b68747470733a2f2f6163636f756e74732e676f6f676c652e636f6d2086bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514"; + let pk = federated_keyless::new_public_key_from_bytes(bytes); + assert!( + bcs::to_bytes(&pk) == bytes, + ); + assert!( + pk.get_keyless_public_key().get_iss() == utf8(b"https://accounts.google.com"), + ); + assert!( + pk.get_keyless_public_key().get_idc() == x"86bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514", + ); + assert!( + pk.get_jwk_address() == @0xaa9b5e7acc48169fdc3809b614532a5a675cf7d4c80cd4aea732b47e328bda1a, + ); + } +} diff --git a/aptos-move/framework/aptos-stdlib/tests/cryptography/keyless_tests.move b/aptos-move/framework/aptos-stdlib/tests/cryptography/keyless_tests.move new file mode 100644 index 0000000000000..3ffa4ea0f54dd --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/cryptography/keyless_tests.move @@ -0,0 +1,20 @@ +#[test_only] +module aptos_std::keyless_tests { + use aptos_std::keyless; + use std::string::{utf8}; + use std::bcs; + + #[test] + fun test_deserialize_public_key() { + // The bytes below represent a Keyless public key that looks like + // keyless::PublicKey { + // iss: "https://accounts.google.com", + // idc: "0x86bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514" + // } + let bytes: vector = x"1b68747470733a2f2f6163636f756e74732e676f6f676c652e636f6d2086bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514"; + let pk = keyless::new_public_key_from_bytes(bytes); + assert!(bcs::to_bytes(&pk) == bytes,); + assert!(pk.get_iss() == utf8(b"https://accounts.google.com")); + assert!(pk.get_idc() == x"86bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514"); + } +} diff --git a/aptos-move/framework/aptos-stdlib/tests/cryptography/multi_key_tests.move b/aptos-move/framework/aptos-stdlib/tests/cryptography/multi_key_tests.move new file mode 100644 index 0000000000000..3ab74afb85d5e --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/cryptography/multi_key_tests.move @@ -0,0 +1,52 @@ +#[test_only] +module aptos_std::multi_key_tests { + use aptos_std::single_key; + use aptos_std::multi_key; + use std::bcs; + #[test] + fun test_construct_multi_key() { + let pk1 = single_key::new_public_key_from_bytes(x"0020aa9b5e7acc48169fdc3809b614532a5a675cf7d4c80cd4aea732b47e328bda1a"); + let pk2 = single_key::new_public_key_from_bytes(x"0020bd182d6e3f4ad1daf0d94e53daaece63ebd571d8a8e0098a02a4c0b4ecc7c99e"); + let multi_key = multi_key::new_multi_key_from_single_keys(vector[pk1, pk2], 1); + let mk_bytes: vector = x"020020aa9b5e7acc48169fdc3809b614532a5a675cf7d4c80cd4aea732b47e328bda1a0020bd182d6e3f4ad1daf0d94e53daaece63ebd571d8a8e0098a02a4c0b4ecc7c99e01"; + assert!(bcs::to_bytes(&multi_key) == mk_bytes); + } + + #[test] + #[expected_failure(abort_code = 0x10003, location = multi_key)] + fun test_construct_multi_key_bad_input_signatures_required_too_large() { + let pk1 = single_key::new_public_key_from_bytes(x"0020aa9b5e7acc48169fdc3809b614532a5a675cf7d4c80cd4aea732b47e328bda1a"); + let pk2 = single_key::new_public_key_from_bytes(x"0020bd182d6e3f4ad1daf0d94e53daaece63ebd571d8a8e0098a02a4c0b4ecc7c99e"); + let _multi_key = multi_key::new_multi_key_from_single_keys(vector[pk1, pk2], 3); + } + + #[test] + #[expected_failure(abort_code = 0x10001, location = multi_key)] + fun test_construct_multi_key_bad_input_no_keys() { + let _multi_key = multi_key::new_multi_key_from_single_keys(vector[], 1); + } + + #[test] + fun test_construct_multi_key_from_bytes() { + let mk_bytes: vector = x"020020aa9b5e7acc48169fdc3809b614532a5a675cf7d4c80cd4aea732b47e328bda1a0020bd182d6e3f4ad1daf0d94e53daaece63ebd571d8a8e0098a02a4c0b4ecc7c99e01"; + let multi_key = multi_key::new_public_key_from_bytes(mk_bytes); + assert!(bcs::to_bytes(&multi_key) == mk_bytes, std::error::invalid_state(1)); + } + + #[test] + #[expected_failure(abort_code = 0x10004, location = multi_key)] + fun test_construct_multi_key_from_bytes_bad_input_extra_bytes() { + let mk_bytes: vector = x"020020aa9b5e7acc48169fdc3809b614532a5a675cf7d4c80cd4aea732b47e328bda1a0020bd182d6e3f4ad1daf0d94e53daaece63ebd571d8a8e0098a02a4c0b4ecc7c99e01"; + mk_bytes.push_back(0x01); + let _multi_key = multi_key::new_public_key_from_bytes(mk_bytes); + } + + #[test] + fun test_get_authentication_key() { + let mk_bytes: vector = x"02031b68747470733a2f2f6163636f756e74732e676f6f676c652e636f6d2086bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d51400205da515f392de68080051559c9d9898f5feb377f0b0f15d43fd01c98f0a63b0d801"; + let multi_key = multi_key::new_public_key_from_bytes(mk_bytes); + assert!( + multi_key.to_authentication_key() == x"c7ab91daf558b00b1f81207b702349a74029dddfbf0e99d54b3d7675714a61de", + ); + } +} diff --git a/aptos-move/framework/aptos-stdlib/tests/cryptography/secp256r1_tests.move b/aptos-move/framework/aptos-stdlib/tests/cryptography/secp256r1_tests.move new file mode 100644 index 0000000000000..34eaddda31589 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/cryptography/secp256r1_tests.move @@ -0,0 +1,10 @@ +#[test_only] +module aptos_std::secp256r1_tests { + use aptos_std::secp256r1; + + #[test] + #[expected_failure(abort_code = 0x10001, location = secp256r1)] + fun test_ecdsa_raw_public_key_from_64_bytes_bad_input() { + let _pk = secp256r1::ecdsa_raw_public_key_from_64_bytes(x"11"); + } +} diff --git a/aptos-move/framework/aptos-stdlib/tests/cryptography/single_key_tests.move b/aptos-move/framework/aptos-stdlib/tests/cryptography/single_key_tests.move new file mode 100644 index 0000000000000..6f4f87c093307 --- /dev/null +++ b/aptos-move/framework/aptos-stdlib/tests/cryptography/single_key_tests.move @@ -0,0 +1,22 @@ +#[test_only] +module aptos_std::single_key_tests { + use aptos_std::single_key; + + #[test] + #[expected_failure(abort_code = 0x10002, location = single_key)] + fun test_deserialize_fails_for_extra_bytes() { + let pk_bytes: vector = x"031b68747470733a2f2f6163636f756e74732e676f6f676c652e636f6d2086bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514"; + pk_bytes.push_back(0x01); + let _any_pk = single_key::new_public_key_from_bytes(pk_bytes); + } + + #[test] + fun test_get_authentication_key() { + let pk_bytes: vector = x"031b68747470733a2f2f6163636f756e74732e676f6f676c652e636f6d2086bc0a0a825eb6337ca1e8a3157e490eac8df23d5cef25d9641ad5e7edc1d514"; + let any_pk = single_key::new_public_key_from_bytes(pk_bytes); + assert!( + any_pk.to_authentication_key() == x"69d542afebf0387b5e4fcb447b79e3fa9b9aaadba4697b51b90b8d7b9649d159", + std::error::invalid_state(1) + ); + } +} diff --git a/aptos-move/framework/aptos-stdlib/tests/fixedpoint64_tests.move b/aptos-move/framework/aptos-stdlib/tests/fixedpoint64_tests.move index 52d1e347cacba..492d0566be26c 100644 --- a/aptos-move/framework/aptos-stdlib/tests/fixedpoint64_tests.move +++ b/aptos-move/framework/aptos-stdlib/tests/fixedpoint64_tests.move @@ -31,7 +31,7 @@ module aptos_std::fixed_point64_tests { #[test] fun create_zero() { let x = fixed_point64::create_from_rational(0, 1); - assert!(fixed_point64::is_zero(x), 0); + assert!(x.is_zero(), 0); } #[test] @@ -97,7 +97,7 @@ module aptos_std::fixed_point64_tests { assert!(not_three == 2, 0); // Try again with a fraction slightly larger than 1/3. - let f = fixed_point64::create_from_raw_value(fixed_point64::get_raw_value(f) + 1); + let f = fixed_point64::create_from_raw_value(f.get_raw_value() + 1); let three = fixed_point64::multiply_u128(9, f); assert!(three == 3, 1); } @@ -106,7 +106,7 @@ module aptos_std::fixed_point64_tests { fun create_from_rational_max_numerator_denominator() { // Test creating a 1.0 fraction from the maximum u64 value. let f = fixed_point64::create_from_rational(MAX_U128, MAX_U128); - let one = fixed_point64::get_raw_value(f); + let one = f.get_raw_value(); assert!(one == POW2_64, 0); // 0x1.00000000 } @@ -115,10 +115,10 @@ module aptos_std::fixed_point64_tests { let one = fixed_point64::create_from_rational(1, 1); let two = fixed_point64::create_from_rational(2, 1); let smaller_number1 = fixed_point64::min(one, two); - let val1 = fixed_point64::get_raw_value(smaller_number1); + let val1 = smaller_number1.get_raw_value(); assert!(val1 == POW2_64, 0); // 0x1.00000000 let smaller_number2 = fixed_point64::min(two, one); - let val2 = fixed_point64::get_raw_value(smaller_number2); + let val2 = smaller_number2.get_raw_value(); assert!(val2 == POW2_64, 0); // 0x1.00000000 } @@ -128,23 +128,23 @@ module aptos_std::fixed_point64_tests { let two = fixed_point64::create_from_rational(2, 1); let larger_number1 = fixed_point64::max(one, two); let larger_number2 = fixed_point64::max(two, one); - let val1 = fixed_point64::get_raw_value(larger_number1); + let val1 = larger_number1.get_raw_value(); assert!(val1 == 2 * POW2_64, 0); // 0x2.00000000 - let val2 = fixed_point64::get_raw_value(larger_number2); + let val2 = larger_number2.get_raw_value(); assert!(val2 == 2 * POW2_64, 0); // 0x2.00000000 } #[test] fun floor_can_return_the_correct_number_zero() { let point_five = fixed_point64::create_from_rational(1, 2); - let val = fixed_point64::floor(point_five); + let val = point_five.floor(); assert!(val == 0, 0); } #[test] fun create_from_u128_create_correct_fixed_point_number() { let one = fixed_point64::create_from_u128(1); - let val = fixed_point64::get_raw_value(one); + let val = one.get_raw_value(); assert!(val == POW2_64, 0); } @@ -157,35 +157,35 @@ module aptos_std::fixed_point64_tests { #[test] fun floor_can_return_the_correct_number_one() { let three_point_five = fixed_point64::create_from_rational(7, 2); // 3.5 - let val = fixed_point64::floor(three_point_five); + let val = three_point_five.floor(); assert!(val == 3, 0); } #[test] fun ceil_can_round_up_correctly() { let point_five = fixed_point64::create_from_rational(1, 2); // 0.5 - let val = fixed_point64::ceil(point_five); + let val = point_five.ceil(); assert!(val == 1, 0); } #[test] fun ceil_will_not_change_if_number_already_integer() { let one = fixed_point64::create_from_rational(1, 1); // 0.5 - let val = fixed_point64::ceil(one); + let val = one.ceil(); assert!(val == 1, 0); } #[test] fun round_can_round_up_correctly() { let point_five = fixed_point64::create_from_rational(1, 2); // 0.5 - let val = fixed_point64::round(point_five); + let val = point_five.round(); assert!(val == 1, 0); } #[test] fun round_can_round_down_correctly() { let num = fixed_point64::create_from_rational(499, 1000); // 0.499 - let val = fixed_point64::round(num); + let val = num.round(); assert!(val == 0, 0); } } diff --git a/aptos-move/framework/aptos-token-objects/doc/aptos_token.md b/aptos-move/framework/aptos-token-objects/doc/aptos_token.md index b3f124c43686e..fb779303c3625 100644 --- a/aptos-move/framework/aptos-token-objects/doc/aptos_token.md +++ b/aptos-move/framework/aptos-token-objects/doc/aptos_token.md @@ -482,9 +482,9 @@ Mint a token into an existing collection, and retrieve the object / address of t let freezable_by_creator = are_collection_tokens_freezable(collection); if (freezable_by_creator) { let aptos_token_addr = object::address_from_constructor_ref(&constructor_ref); - let aptos_token = borrow_global_mut<AptosToken>(aptos_token_addr); + let aptos_token = &mut AptosToken[aptos_token_addr]; let transfer_ref = object::generate_transfer_ref(&constructor_ref); - option::fill(&mut aptos_token.transfer_ref, transfer_ref); + aptos_token.transfer_ref.fill(transfer_ref); }; object::object_from_constructor_ref(&constructor_ref) @@ -679,7 +679,7 @@ With an existing collection, directly mint a soul bound token into the recipient exists<AptosToken>(token_address), error::not_found(ETOKEN_DOES_NOT_EXIST), ); - borrow_global<AptosToken>(token_address) + &AptosToken[token_address] }
@@ -730,7 +730,7 @@ With an existing collection, directly mint a soul bound token into the recipient
public fun is_burnable<T: key>(token: Object<T>): bool acquires AptosToken {
-    option::is_some(&borrow(&token).burn_ref)
+    borrow(&token).burn_ref.is_some()
 }
 
@@ -864,7 +864,7 @@ With an existing collection, directly mint a soul bound token into the recipient token::creator(*token) == signer::address_of(creator), error::permission_denied(ENOT_CREATOR), ); - borrow_global<AptosToken>(token_address) + &AptosToken[token_address] }
@@ -890,7 +890,7 @@ With an existing collection, directly mint a soul bound token into the recipient
public entry fun burn<T: key>(creator: &signer, token: Object<T>) acquires AptosToken {
     let aptos_token = authorized_borrow(&token, creator);
     assert!(
-        option::is_some(&aptos_token.burn_ref),
+        aptos_token.burn_ref.is_some(),
         error::permission_denied(ETOKEN_NOT_BURNABLE),
     );
     move aptos_token;
@@ -902,7 +902,7 @@ With an existing collection, directly mint a soul bound token into the recipient
         property_mutator_ref,
     } = aptos_token;
     property_map::burn(property_mutator_ref);
-    token::burn(option::extract(&mut burn_ref));
+    token::burn(burn_ref.extract());
 }
 
@@ -929,10 +929,10 @@ With an existing collection, directly mint a soul bound token into the recipient let aptos_token = authorized_borrow(&token, creator); assert!( are_collection_tokens_freezable(token::collection_object(token)) - && option::is_some(&aptos_token.transfer_ref), + && aptos_token.transfer_ref.is_some(), error::permission_denied(EFIELD_NOT_MUTABLE), ); - object::disable_ungated_transfer(option::borrow(&aptos_token.transfer_ref)); + object::disable_ungated_transfer(aptos_token.transfer_ref.borrow()); }
@@ -962,10 +962,10 @@ With an existing collection, directly mint a soul bound token into the recipient let aptos_token = authorized_borrow(&token, creator); assert!( are_collection_tokens_freezable(token::collection_object(token)) - && option::is_some(&aptos_token.transfer_ref), + && aptos_token.transfer_ref.is_some(), error::permission_denied(EFIELD_NOT_MUTABLE), ); - object::enable_ungated_transfer(option::borrow(&aptos_token.transfer_ref)); + object::enable_ungated_transfer(aptos_token.transfer_ref.borrow()); }
@@ -998,7 +998,7 @@ With an existing collection, directly mint a soul bound token into the recipient error::permission_denied(EFIELD_NOT_MUTABLE), ); let aptos_token = authorized_borrow(&token, creator); - token::set_description(option::borrow(&aptos_token.mutator_ref), description); + token::set_description(aptos_token.mutator_ref.borrow(), description); }
@@ -1031,7 +1031,7 @@ With an existing collection, directly mint a soul bound token into the recipient error::permission_denied(EFIELD_NOT_MUTABLE), ); let aptos_token = authorized_borrow(&token, creator); - token::set_name(option::borrow(&aptos_token.mutator_ref), name); + token::set_name(aptos_token.mutator_ref.borrow(), name); }
@@ -1064,7 +1064,7 @@ With an existing collection, directly mint a soul bound token into the recipient error::permission_denied(EFIELD_NOT_MUTABLE), ); let aptos_token = authorized_borrow(&token, creator); - token::set_uri(option::borrow(&aptos_token.mutator_ref), uri); + token::set_uri(aptos_token.mutator_ref.borrow(), uri); }
@@ -1294,7 +1294,7 @@ With an existing collection, directly mint a soul bound token into the recipient exists<AptosCollection>(collection_address), error::not_found(ECOLLECTION_DOES_NOT_EXIST), ); - borrow_global<AptosCollection>(collection_address) + &AptosCollection[collection_address] }
@@ -1346,7 +1346,7 @@ With an existing collection, directly mint a soul bound token into the recipient
public fun is_mutable_collection_royalty<T: key>(
     collection: Object<T>,
 ): bool acquires AptosCollection {
-    option::is_some(&borrow_collection(&collection).royalty_mutator_ref)
+    borrow_collection(&collection).royalty_mutator_ref.is_some()
 }
 
@@ -1561,7 +1561,7 @@ With an existing collection, directly mint a soul bound token into the recipient collection::creator(*collection) == signer::address_of(creator), error::permission_denied(ENOT_CREATOR), ); - borrow_global<AptosCollection>(collection_address) + &AptosCollection[collection_address] }
@@ -1594,7 +1594,7 @@ With an existing collection, directly mint a soul bound token into the recipient aptos_collection.mutable_description, error::permission_denied(EFIELD_NOT_MUTABLE), ); - collection::set_description(option::borrow(&aptos_collection.mutator_ref), description); + collection::set_description(aptos_collection.mutator_ref.borrow(), description); }
@@ -1624,10 +1624,10 @@ With an existing collection, directly mint a soul bound token into the recipient ) acquires AptosCollection { let aptos_collection = authorized_borrow_collection(&collection, creator); assert!( - option::is_some(&aptos_collection.royalty_mutator_ref), + aptos_collection.royalty_mutator_ref.is_some(), error::permission_denied(EFIELD_NOT_MUTABLE), ); - royalty::update(option::borrow(&aptos_collection.royalty_mutator_ref), royalty); + royalty::update(aptos_collection.royalty_mutator_ref.borrow(), royalty); }
@@ -1691,7 +1691,7 @@ With an existing collection, directly mint a soul bound token into the recipient aptos_collection.mutable_uri, error::permission_denied(EFIELD_NOT_MUTABLE), ); - collection::set_uri(option::borrow(&aptos_collection.mutator_ref), uri); + collection::set_uri(aptos_collection.mutator_ref.borrow(), uri); }
diff --git a/aptos-move/framework/aptos-token-objects/doc/collection.md b/aptos-move/framework/aptos-token-objects/doc/collection.md index 8c992d2badc36..4e36b6b0ac4b0 100644 --- a/aptos-move/framework/aptos-token-objects/doc/collection.md +++ b/aptos-move/framework/aptos-token-objects/doc/collection.md @@ -39,9 +39,12 @@ require adding the field original_name. - [Struct `SetMaxSupply`](#0x4_collection_SetMaxSupply) - [Constants](#@Constants_0) - [Function `create_fixed_collection`](#0x4_collection_create_fixed_collection) +- [Function `create_fixed_collection_as_owner`](#0x4_collection_create_fixed_collection_as_owner) - [Function `create_unlimited_collection`](#0x4_collection_create_unlimited_collection) +- [Function `create_unlimited_collection_as_owner`](#0x4_collection_create_unlimited_collection_as_owner) - [Function `create_untracked_collection`](#0x4_collection_create_untracked_collection) - [Function `create_collection_internal`](#0x4_collection_create_collection_internal) +- [Function `enable_ungated_transfer`](#0x4_collection_enable_ungated_transfer) - [Function `create_collection_address`](#0x4_collection_create_collection_address) - [Function `create_collection_seed`](#0x4_collection_create_collection_seed) - [Function `increment_supply`](#0x4_collection_increment_supply) @@ -60,6 +63,8 @@ require adding the field original_name. - [Function `set_description`](#0x4_collection_set_description) - [Function `set_uri`](#0x4_collection_set_uri) - [Function `set_max_supply`](#0x4_collection_set_max_supply) +- [Specification](#@Specification_1) + - [Function `increment_supply`](#@Specification_1_increment_supply)
use 0x1::aggregator_v2;
@@ -708,6 +713,16 @@ The collection name is over the maximum length
 
 
 
+
+
+The collection owner feature is not supported
+
+
+
const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 11;
+
+ + + The collection has reached its supply and no more tokens can be minted, unless some are burned @@ -837,6 +852,51 @@ Beyond that, it adds supply tracking with events. + + + + +## Function `create_fixed_collection_as_owner` + +Same functionality as create_fixed_collection, but the caller is the owner of the collection. +This means that the caller can transfer the collection to another address. +This transfers ownership and minting permissions to the new address. + + +
public fun create_fixed_collection_as_owner(creator: &signer, description: string::String, max_supply: u64, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_fixed_collection_as_owner(
+    creator: &signer,
+    description: String,
+    max_supply: u64,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+
+    let constructor_ref = create_fixed_collection(
+        creator,
+        description,
+        max_supply,
+        name,
+        royalty,
+        uri,
+    );
+    enable_ungated_transfer(&constructor_ref);
+    constructor_ref
+}
+
+ + +
@@ -885,6 +945,49 @@ the supply of tokens. + + + + +## Function `create_unlimited_collection_as_owner` + +Same functionality as create_unlimited_collection, but the caller is the owner of the collection. +This means that the caller can transfer the collection to another address. +This transfers ownership and minting permissions to the new address. + + +
public fun create_unlimited_collection_as_owner(creator: &signer, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_unlimited_collection_as_owner(
+    creator: &signer,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+
+    let constructor_ref = create_unlimited_collection(
+        creator,
+        description,
+        name,
+        royalty,
+        uri,
+    );
+    enable_ungated_transfer(&constructor_ref);
+    constructor_ref
+}
+
+ + +
@@ -955,9 +1058,9 @@ TODO: Hide this until we bring back meaningful way to enforce burns uri: String, supply: Option<Supply>, ): ConstructorRef { - assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG)); - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); - assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); + assert!(name.length() <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + assert!(description.length() <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); let object_signer = object::generate_signer(&constructor_ref); @@ -970,14 +1073,14 @@ TODO: Hide this until we bring back meaningful way to enforce burns }; move_to(&object_signer, collection); - if (option::is_some(&supply)) { - move_to(&object_signer, option::destroy_some(supply)) + if (supply.is_some()) { + move_to(&object_signer, supply.destroy_some()) } else { - option::destroy_none(supply) + supply.destroy_none() }; - if (option::is_some(&royalty)) { - royalty::init(&constructor_ref, option::extract(&mut royalty)) + if (royalty.is_some()) { + royalty::init(&constructor_ref, royalty.extract()) }; let transfer_ref = object::generate_transfer_ref(&constructor_ref); @@ -989,6 +1092,31 @@ TODO: Hide this until we bring back meaningful way to enforce burns + + + + +## Function `enable_ungated_transfer` + + + +
fun enable_ungated_transfer(constructor_ref: &object::ConstructorRef)
+
+ + + +
+Implementation + + +
inline fun enable_ungated_transfer(constructor_ref: &ConstructorRef) {
+    let transfer_ref = object::generate_transfer_ref(constructor_ref);
+    object::enable_ungated_transfer(&transfer_ref);
+}
+
+ + +
@@ -1033,8 +1161,8 @@ Named objects are derived from a seed, the collection's seed is its name.
public fun create_collection_seed(name: &String): vector<u8> {
-    assert!(string::length(name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG));
-    *string::bytes(name)
+    assert!(name.length() <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG));
+    *name.bytes()
 }
 
@@ -1058,13 +1186,13 @@ Called by token on mint to increment supply if there's an appropriate Supply str Implementation -
public(friend) fun increment_supply(
+
friend fun increment_supply(
     collection: &Object<Collection>,
     token: address,
 ): Option<AggregatorSnapshot<u64>> acquires FixedSupply, UnlimitedSupply, ConcurrentSupply {
     let collection_addr = object::object_address(collection);
     if (exists<ConcurrentSupply>(collection_addr)) {
-        let supply = borrow_global_mut<ConcurrentSupply>(collection_addr);
+        let supply = &mut ConcurrentSupply[collection_addr];
         assert!(
             aggregator_v2::try_add(&mut supply.current_supply, 1),
             error::out_of_range(ECOLLECTION_SUPPLY_EXCEEDED),
@@ -1079,9 +1207,9 @@ Called by token on mint to increment supply if there's an appropriate Supply str
         );
         option::some(aggregator_v2::snapshot(&supply.total_minted))
     } else if (exists<FixedSupply>(collection_addr)) {
-        let supply = borrow_global_mut<FixedSupply>(collection_addr);
-        supply.current_supply = supply.current_supply + 1;
-        supply.total_minted = supply.total_minted + 1;
+        let supply = &mut FixedSupply[collection_addr];
+        supply.current_supply += 1;
+        supply.total_minted += 1;
         assert!(
             supply.current_supply <= supply.max_supply,
             error::out_of_range(ECOLLECTION_SUPPLY_EXCEEDED),
@@ -1094,18 +1222,19 @@ Called by token on mint to increment supply if there's an appropriate Supply str
                     token,
                 },
             );
+        } else {
+            event::emit_event(&mut supply.mint_events,
+                MintEvent {
+                    index: supply.total_minted,
+                    token,
+                },
+            );
         };
-        event::emit_event(&mut supply.mint_events,
-            MintEvent {
-                index: supply.total_minted,
-                token,
-            },
-        );
         option::some(aggregator_v2::create_snapshot<u64>(supply.total_minted))
     } else if (exists<UnlimitedSupply>(collection_addr)) {
-        let supply = borrow_global_mut<UnlimitedSupply>(collection_addr);
-        supply.current_supply = supply.current_supply + 1;
-        supply.total_minted = supply.total_minted + 1;
+        let supply = &mut UnlimitedSupply[collection_addr];
+        supply.current_supply += 1;
+        supply.total_minted += 1;
         if (std::features::module_event_migration_enabled()) {
             event::emit(
                 Mint {
@@ -1114,14 +1243,15 @@ Called by token on mint to increment supply if there's an appropriate Supply str
                     token,
                 },
             );
+        } else {
+            event::emit_event(
+                &mut supply.mint_events,
+                MintEvent {
+                    index: supply.total_minted,
+                    token,
+                },
+            );
         };
-        event::emit_event(
-            &mut supply.mint_events,
-            MintEvent {
-                index: supply.total_minted,
-                token,
-            },
-        );
         option::some(aggregator_v2::create_snapshot<u64>(supply.total_minted))
     } else {
         option::none()
@@ -1149,7 +1279,7 @@ Called by token on burn to decrement supply if there's an appropriate Supply str
 Implementation
 
 
-
public(friend) fun decrement_supply(
+
friend fun decrement_supply(
     collection: &Object<Collection>,
     token: address,
     index: Option<u64>,
@@ -1157,57 +1287,59 @@ Called by token on burn to decrement supply if there's an appropriate Supply str
 ) acquires FixedSupply, UnlimitedSupply, ConcurrentSupply {
     let collection_addr = object::object_address(collection);
     if (exists<ConcurrentSupply>(collection_addr)) {
-        let supply = borrow_global_mut<ConcurrentSupply>(collection_addr);
+        let supply = &mut ConcurrentSupply[collection_addr];
         aggregator_v2::sub(&mut supply.current_supply, 1);
 
         event::emit(
             Burn {
                 collection: collection_addr,
-                index: *option::borrow(&index),
+                index: *index.borrow(),
                 token,
                 previous_owner,
             },
         );
     } else if (exists<FixedSupply>(collection_addr)) {
-        let supply = borrow_global_mut<FixedSupply>(collection_addr);
-        supply.current_supply = supply.current_supply - 1;
+        let supply = &mut FixedSupply[collection_addr];
+        supply.current_supply -= 1;
         if (std::features::module_event_migration_enabled()) {
             event::emit(
                 Burn {
                     collection: collection_addr,
-                    index: *option::borrow(&index),
+                    index: *index.borrow(),
                     token,
                     previous_owner,
                 },
             );
+        } else {
+            event::emit_event(
+                &mut supply.burn_events,
+                BurnEvent {
+                    index: *index.borrow(),
+                    token,
+                },
+            );
         };
-        event::emit_event(
-            &mut supply.burn_events,
-            BurnEvent {
-                index: *option::borrow(&index),
-                token,
-            },
-        );
     } else if (exists<UnlimitedSupply>(collection_addr)) {
-        let supply = borrow_global_mut<UnlimitedSupply>(collection_addr);
-        supply.current_supply = supply.current_supply - 1;
+        let supply = &mut UnlimitedSupply[collection_addr];
+        supply.current_supply -= 1;
         if (std::features::module_event_migration_enabled()) {
             event::emit(
                 Burn {
                     collection: collection_addr,
-                    index: *option::borrow(&index),
+                    index: *index.borrow(),
                     token,
                     previous_owner,
                 },
             );
+        } else {
+            event::emit_event(
+                &mut supply.burn_events,
+                BurnEvent {
+                    index: *index.borrow(),
+                    token,
+                },
+            );
         };
-        event::emit_event(
-            &mut supply.burn_events,
-            BurnEvent {
-                index: *option::borrow(&index),
-                token,
-            },
-        );
     }
 }
 
@@ -1356,7 +1488,7 @@ Creates a MutatorRef, which gates the ability to mutate any fields that support
inline fun borrow<T: key>(collection: &Object<T>): &Collection {
     let collection_address = object::object_address(collection);
     check_collection_exists(collection_address);
-    borrow_global<Collection>(collection_address)
+    &Collection[collection_address]
 }
 
@@ -1391,13 +1523,13 @@ it from being parallelized. check_collection_exists(collection_address); if (exists<ConcurrentSupply>(collection_address)) { - let supply = borrow_global_mut<ConcurrentSupply>(collection_address); + let supply = &ConcurrentSupply[collection_address]; option::some(aggregator_v2::read(&supply.current_supply)) } else if (exists<FixedSupply>(collection_address)) { - let supply = borrow_global_mut<FixedSupply>(collection_address); + let supply = &FixedSupply[collection_address]; option::some(supply.current_supply) } else if (exists<UnlimitedSupply>(collection_address)) { - let supply = borrow_global_mut<UnlimitedSupply>(collection_address); + let supply = &UnlimitedSupply[collection_address]; option::some(supply.current_supply) } else { option::none() @@ -1526,7 +1658,7 @@ it from being parallelized.
inline fun borrow_mut(mutator_ref: &MutatorRef): &mut Collection {
     check_collection_exists(mutator_ref.self);
-    borrow_global_mut<Collection>(mutator_ref.self)
+    &mut Collection[mutator_ref.self]
 }
 
@@ -1556,7 +1688,7 @@ After changing the collection's name, to create tokens - only call functions tha
public fun set_name(mutator_ref: &MutatorRef, name: String) acquires Collection {
-    assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG));
+    assert!(name.length() <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG));
     let collection = borrow_mut(mutator_ref);
     event::emit(Mutation {
         mutated_field_name: string::utf8(b"name") ,
@@ -1588,7 +1720,7 @@ After changing the collection's name, to create tokens - only call functions tha
 
 
 
public fun set_description(mutator_ref: &MutatorRef, description: String) acquires Collection {
-    assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
+    assert!(description.length() <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
     let collection = borrow_mut(mutator_ref);
     if (std::features::module_event_migration_enabled()) {
         event::emit(Mutation {
@@ -1597,12 +1729,13 @@ After changing the collection's name, to create tokens - only call functions tha
             old_value: collection.description,
             new_value: description,
         });
+    } else {
+        event::emit_event(
+            &mut collection.mutation_events,
+            MutationEvent { mutated_field_name: string::utf8(b"description") },
+        );
     };
     collection.description = description;
-    event::emit_event(
-        &mut collection.mutation_events,
-        MutationEvent { mutated_field_name: string::utf8(b"description") },
-    );
 }
 
@@ -1626,7 +1759,7 @@ After changing the collection's name, to create tokens - only call functions tha
public fun set_uri(mutator_ref: &MutatorRef, uri: String) acquires Collection {
-    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+    assert!(uri.length() <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
     let collection = borrow_mut(mutator_ref);
     if (std::features::module_event_migration_enabled()) {
         event::emit(Mutation {
@@ -1635,12 +1768,13 @@ After changing the collection's name, to create tokens - only call functions tha
             old_value: collection.uri,
             new_value: uri,
         });
+    } else {
+        event::emit_event(
+            &mut collection.mutation_events,
+            MutationEvent { mutated_field_name: string::utf8(b"uri") },
+        );
     };
     collection.uri = uri;
-    event::emit_event(
-        &mut collection.mutation_events,
-        MutationEvent { mutated_field_name: string::utf8(b"uri") },
-    );
 }
 
@@ -1669,7 +1803,7 @@ After changing the collection's name, to create tokens - only call functions tha let old_max_supply; if (exists<ConcurrentSupply>(collection_address)) { - let supply = borrow_global_mut<ConcurrentSupply>(collection_address); + let supply = &mut ConcurrentSupply[collection_address]; let current_supply = aggregator_v2::read(&supply.current_supply); assert!( max_supply >= current_supply, @@ -1679,7 +1813,7 @@ After changing the collection's name, to create tokens - only call functions tha supply.current_supply = aggregator_v2::create_aggregator(max_supply); aggregator_v2::add(&mut supply.current_supply, current_supply); } else if (exists<FixedSupply>(collection_address)) { - let supply = borrow_global_mut<FixedSupply>(collection_address); + let supply = &mut FixedSupply[collection_address]; assert!( max_supply >= supply.current_supply, error::out_of_range(EINVALID_MAX_SUPPLY), @@ -1698,5 +1832,41 @@ After changing the collection's name, to create tokens - only call functions tha + + +## Specification + + + + +### Function `increment_supply` + + +
public(friend) fun increment_supply(collection: &object::Object<collection::Collection>, token: address): option::Option<aggregator_v2::AggregatorSnapshot<u64>>
+
+ + + + +
pragma aborts_if_is_partial;
+let collection_addr = object::object_address(collection);
+let supply = global<ConcurrentSupply>(collection_addr);
+let post supply_post = global<ConcurrentSupply>(collection_addr);
+aborts_if exists<ConcurrentSupply>(collection_addr) &&
+    aggregator_v2::spec_get_value(supply.current_supply) + 1
+        > aggregator_v2::spec_get_max_value(supply.current_supply);
+aborts_if exists<ConcurrentSupply>(collection_addr) &&
+    aggregator_v2::spec_get_value(supply.total_minted) + 1
+        > aggregator_v2::spec_get_max_value(supply.total_minted);
+ensures
+    aggregator_v2::spec_get_max_value(supply.current_supply)
+        == aggregator_v2::spec_get_max_value(supply_post.current_supply);
+ensures exists<ConcurrentSupply>(collection_addr) &&
+    aggregator_v2::spec_get_value(supply.current_supply) + 1
+        <= aggregator_v2::spec_get_max_value(supply.current_supply) ==>
+    aggregator_v2::spec_get_value(supply.current_supply) + 1
+        == aggregator_v2::spec_get_value(supply_post.current_supply);
+
+ [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token-objects/doc/property_map.md b/aptos-move/framework/aptos-token-objects/doc/property_map.md index eec8f62dc627e..d2ac66e4d885e 100644 --- a/aptos-move/framework/aptos-token-objects/doc/property_map.md +++ b/aptos-move/framework/aptos-token-objects/doc/property_map.md @@ -441,26 +441,26 @@ Helper for external entry functions to produce a valid container for property va types: vector<String>, values: vector<vector<u8>>, ): PropertyMap { - let length = vector::length(&keys); + let length = keys.length(); assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(ETOO_MANY_PROPERTIES)); - assert!(length == vector::length(&values), error::invalid_argument(EKEY_VALUE_COUNT_MISMATCH)); - assert!(length == vector::length(&types), error::invalid_argument(EKEY_TYPE_COUNT_MISMATCH)); + assert!(length == values.length(), error::invalid_argument(EKEY_VALUE_COUNT_MISMATCH)); + assert!(length == types.length(), error::invalid_argument(EKEY_TYPE_COUNT_MISMATCH)); let container = simple_map::create<String, PropertyValue>(); - while (!vector::is_empty(&keys)) { - let key = vector::pop_back(&mut keys); + while (!keys.is_empty()) { + let key = keys.pop_back(); assert!( - string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, + key.length() <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_KEY_TOO_LONG), ); - let value = vector::pop_back(&mut values); - let type = vector::pop_back(&mut types); + let value = values.pop_back(); + let type = types.pop_back(); let new_type = to_internal_type(type); validate_type(new_type, value); - simple_map::add(&mut container, key, PropertyValue { value, type: new_type }); + container.add(key, PropertyValue { value, type: new_type }); }; PropertyMap { inner: container } @@ -679,8 +679,8 @@ Validates property value type against its expected type
public fun contains_key<T: key>(object: &Object<T>, key: &String): bool acquires PropertyMap {
     assert_exists(object::object_address(object));
-    let property_map = borrow_global<PropertyMap>(object::object_address(object));
-    simple_map::contains_key(&property_map.inner, key)
+    let property_map = &PropertyMap[object::object_address(object)];
+    property_map.inner.contains_key(key)
 }
 
@@ -705,8 +705,8 @@ Validates property value type against its expected type
public fun length<T: key>(object: &Object<T>): u64 acquires PropertyMap {
     assert_exists(object::object_address(object));
-    let property_map = borrow_global<PropertyMap>(object::object_address(object));
-    simple_map::length(&property_map.inner)
+    let property_map = &PropertyMap[object::object_address(object)];
+    property_map.inner.length()
 }
 
@@ -734,8 +734,8 @@ The preferred method is to use read_<type> where the type is
public fun read<T: key>(object: &Object<T>, key: &String): (String, vector<u8>) acquires PropertyMap {
     assert_exists(object::object_address(object));
-    let property_map = borrow_global<PropertyMap>(object::object_address(object));
-    let property_value = simple_map::borrow(&property_map.inner, key);
+    let property_map = &PropertyMap[object::object_address(object)];
+    let property_value = property_map.inner.borrow(key);
     let new_type = to_external_type(property_value.type);
     (new_type, property_value.value)
 }
@@ -1122,8 +1122,8 @@ Add a property that isn't already encoded as a add_internal(ref: &MutatorRef, key: String, type: u8, value: vector<u8>) acquires PropertyMap {
     assert_exists(ref.self);
-    let property_map = borrow_global_mut<PropertyMap>(ref.self);
-    simple_map::add(&mut property_map.inner, key, PropertyValue { type, value });
+    let property_map = &mut PropertyMap[ref.self];
+    property_map.inner.add(key, PropertyValue { type, value });
 }
 
@@ -1201,8 +1201,8 @@ Updates a property in place that is not already bcs encoded
inline fun update_internal(ref: &MutatorRef, key: &String, type: u8, value: vector<u8>) acquires PropertyMap {
     assert_exists(ref.self);
-    let property_map = borrow_global_mut<PropertyMap>(ref.self);
-    let old_value = simple_map::borrow_mut(&mut property_map.inner, key);
+    let property_map = &mut PropertyMap[ref.self];
+    let old_value = property_map.inner.borrow_mut(key);
     *old_value = PropertyValue { type, value };
 }
 
@@ -1229,8 +1229,8 @@ Removes a property from the map, ensuring that it does in fact exist
public fun remove(ref: &MutatorRef, key: &String) acquires PropertyMap {
     assert_exists(ref.self);
-    let property_map = borrow_global_mut<PropertyMap>(ref.self);
-    simple_map::remove(&mut property_map.inner, key);
+    let property_map = &mut PropertyMap[ref.self];
+    property_map.inner.remove(key);
 }
 
diff --git a/aptos-move/framework/aptos-token-objects/doc/royalty.md b/aptos-move/framework/aptos-token-objects/doc/royalty.md index 84cabeecbfaef..e5236c1bd40ac 100644 --- a/aptos-move/framework/aptos-token-objects/doc/royalty.md +++ b/aptos-move/framework/aptos-token-objects/doc/royalty.md @@ -286,7 +286,7 @@ Creates a new royalty, verifying that it is a valid percentage Implementation -
public(friend) fun delete(addr: address) acquires Royalty {
+
friend fun delete(addr: address) acquires Royalty {
     assert!(exists<Royalty>(addr), error::not_found(EROYALTY_DOES_NOT_EXIST));
     move_from<Royalty>(addr);
 }
@@ -314,7 +314,7 @@ Creates a new royalty, verifying that it is a valid percentage
 
public fun get<T: key>(maybe_royalty: Object<T>): Option<Royalty> acquires Royalty {
     let obj_addr = object::object_address(&maybe_royalty);
     if (exists<Royalty>(obj_addr)) {
-        option::some(*borrow_global<Royalty>(obj_addr))
+        option::some(Royalty[obj_addr])
     } else {
         option::none()
     }
diff --git a/aptos-move/framework/aptos-token-objects/doc/token.md b/aptos-move/framework/aptos-token-objects/doc/token.md
index b4c53eb85ef63..17ae275c69527 100644
--- a/aptos-move/framework/aptos-token-objects/doc/token.md
+++ b/aptos-move/framework/aptos-token-objects/doc/token.md
@@ -20,13 +20,19 @@ token are:
 -  [Constants](#@Constants_0)
 -  [Function `create_common`](#0x4_token_create_common)
 -  [Function `create_common_with_collection`](#0x4_token_create_common_with_collection)
+-  [Function `create_common_with_collection_as_owner`](#0x4_token_create_common_with_collection_as_owner)
+-  [Function `create_common_with_collection_internal`](#0x4_token_create_common_with_collection_internal)
 -  [Function `create_token`](#0x4_token_create_token)
 -  [Function `create`](#0x4_token_create)
+-  [Function `create_token_as_collection_owner`](#0x4_token_create_token_as_collection_owner)
 -  [Function `create_numbered_token_object`](#0x4_token_create_numbered_token_object)
 -  [Function `create_numbered_token`](#0x4_token_create_numbered_token)
+-  [Function `create_numbered_token_as_collection_owner`](#0x4_token_create_numbered_token_as_collection_owner)
 -  [Function `create_named_token_object`](#0x4_token_create_named_token_object)
 -  [Function `create_named_token`](#0x4_token_create_named_token)
+-  [Function `create_named_token_as_collection_owner`](#0x4_token_create_named_token_as_collection_owner)
 -  [Function `create_named_token_from_seed`](#0x4_token_create_named_token_from_seed)
+-  [Function `create_named_token_from_seed_as_collection_owner`](#0x4_token_create_named_token_from_seed_as_collection_owner)
 -  [Function `create_from_account`](#0x4_token_create_from_account)
 -  [Function `create_token_address`](#0x4_token_create_token_address)
 -  [Function `create_token_address_with_seed`](#0x4_token_create_token_address_with_seed)
@@ -380,6 +386,26 @@ The URI is over the maximum length
 
 
 
+
+
+The calling signer is not the owner
+
+
+
const ENOT_OWNER: u64 = 8;
+
+ + + + + +The collection owner feature is not supported + + +
const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 9;
+
+ + + The description is over the maximum length @@ -544,30 +570,115 @@ The token name is over the maximum length ) { assert!(collection::creator(collection) == signer::address_of(creator), error::unauthenticated(ENOT_CREATOR)); - if (option::is_some(&name_with_index_suffix)) { + create_common_with_collection_internal( + constructor_ref, + collection, + description, + name_prefix, + name_with_index_suffix, + royalty, + uri + ); +} +
+ + + + + + + +## Function `create_common_with_collection_as_owner` + + + +
fun create_common_with_collection_as_owner(owner: &signer, constructor_ref: &object::ConstructorRef, collection: object::Object<collection::Collection>, description: string::String, name_prefix: string::String, name_with_index_suffix: option::Option<string::String>, royalty: option::Option<royalty::Royalty>, uri: string::String)
+
+ + + +
+Implementation + + +
inline fun create_common_with_collection_as_owner(
+    owner: &signer,
+    constructor_ref: &ConstructorRef,
+    collection: Object<Collection>,
+    description: String,
+    name_prefix: String,
+    // If option::some, numbered token is created - i.e. index is appended to the name.
+    // If option::none, name_prefix is the full name of the token.
+    name_with_index_suffix: Option<String>,
+    royalty: Option<Royalty>,
+    uri: String,
+) {
+    assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED));
+    assert!(object::owner(collection) == signer::address_of(owner), error::unauthenticated(ENOT_OWNER));
+
+    create_common_with_collection_internal(
+        constructor_ref,
+        collection,
+        description,
+        name_prefix,
+        name_with_index_suffix,
+        royalty,
+        uri
+    );
+}
+
+ + + +
+ + + +## Function `create_common_with_collection_internal` + + + +
fun create_common_with_collection_internal(constructor_ref: &object::ConstructorRef, collection: object::Object<collection::Collection>, description: string::String, name_prefix: string::String, name_with_index_suffix: option::Option<string::String>, royalty: option::Option<royalty::Royalty>, uri: string::String)
+
+ + + +
+Implementation + + +
inline fun create_common_with_collection_internal(
+    constructor_ref: &ConstructorRef,
+    collection: Object<Collection>,
+    description: String,
+    name_prefix: String,
+    // If option::some, numbered token is created - i.e. index is appended to the name.
+    // If option::none, name_prefix is the full name of the token.
+    name_with_index_suffix: Option<String>,
+    royalty: Option<Royalty>,
+    uri: String,
+) {
+    if (name_with_index_suffix.is_some()) {
         // Be conservative, as we don't know what length the index will be, and assume worst case (20 chars in MAX_U64)
         assert!(
-            string::length(&name_prefix) + 20 + string::length(
-                option::borrow(&name_with_index_suffix)
-            ) <= MAX_TOKEN_NAME_LENGTH,
+            name_prefix.length() + 20 + name_with_index_suffix.borrow().length() <= MAX_TOKEN_NAME_LENGTH,
             error::out_of_range(ETOKEN_NAME_TOO_LONG)
         );
     } else {
-        assert!(string::length(&name_prefix) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
+        assert!(name_prefix.length() <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
     };
-    assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
-    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+    assert!(description.length() <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
+    assert!(uri.length() <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
 
     let object_signer = object::generate_signer(constructor_ref);
 
-    let index = option::destroy_with_default(
-        collection::increment_supply(&collection, signer::address_of(&object_signer)),
+    let index = collection::increment_supply(&collection, signer::address_of(&object_signer)).destroy_with_default(
         aggregator_v2::create_snapshot<u64>(0)
     );
 
     // If create_numbered_token called us, add index to the name.
-    let name = if (option::is_some(&name_with_index_suffix)) {
-        aggregator_v2::derive_string_concat(name_prefix, &index, option::extract(&mut name_with_index_suffix))
+    let name = if (name_with_index_suffix.is_some()) {
+        aggregator_v2::derive_string_concat(name_prefix, &index, name_with_index_suffix.extract())
     } else {
         aggregator_v2::create_derived_string(name_prefix)
     };
@@ -591,8 +702,8 @@ The token name is over the maximum length
     };
     move_to(&object_signer, token);
 
-    if (option::is_some(&royalty)) {
-        royalty::init(constructor_ref, option::extract(&mut royalty))
+    if (royalty.is_some()) {
+        royalty::init(constructor_ref, royalty.extract())
     };
 }
 
@@ -691,6 +802,50 @@ for additional specialization. +
+ + + +## Function `create_token_as_collection_owner` + +Same functionality as create_token, but the token can only be created by the collection owner. + + +
public fun create_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + +
@@ -792,6 +947,51 @@ while providing sequential names. + + + + +## Function `create_numbered_token_as_collection_owner` + +Same functionality as create_numbered_token_object, but the token can only be created by the collection owner. + + +
public fun create_numbered_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name_with_index_prefix: string::String, name_with_index_suffix: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_numbered_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name_with_index_prefix: String,
+    name_with_index_suffix: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let creator_address = signer::address_of(creator);
+    let constructor_ref = object::create_object(creator_address);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name_with_index_prefix,
+        option::some(name_with_index_suffix),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + +
@@ -884,6 +1084,50 @@ additional specialization. + + + + +## Function `create_named_token_as_collection_owner` + +Same functionality as create_named_token_object, but the token can only be created by the collection owner. + + +
public fun create_named_token_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_seed(&collection::name(collection), &name);
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + +
@@ -922,6 +1166,51 @@ This function must be called if the collection name has been previously changed. + + + + +## Function `create_named_token_from_seed_as_collection_owner` + +Same functionality as create_named_token_from_seed, but the token can only be created by the collection owner. + + +
public fun create_named_token_from_seed_as_collection_owner(creator: &signer, collection: object::Object<collection::Collection>, description: string::String, name: string::String, seed: string::String, royalty: option::Option<royalty::Royalty>, uri: string::String): object::ConstructorRef
+
+ + + +
+Implementation + + +
public fun create_named_token_from_seed_as_collection_owner(
+    creator: &signer,
+    collection: Object<Collection>,
+    description: String,
+    name: String,
+    seed: String,
+    royalty: Option<Royalty>,
+    uri: String,
+): ConstructorRef {
+    let seed = create_token_name_with_seed(&collection::name(collection), &name, &seed);
+    let constructor_ref = object::create_named_object(creator, seed);
+    create_common_with_collection_as_owner(
+        creator,
+        &constructor_ref,
+        collection,
+        description,
+        name,
+        option::none(),
+        royalty,
+        uri
+    );
+    constructor_ref
+}
+
+ + +
@@ -1040,10 +1329,10 @@ Named objects are derived from a seed, the token's seed is its name appended to
public fun create_token_seed(collection: &String, name: &String): vector<u8> {
-    assert!(string::length(name) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
-    let seed = *string::bytes(collection);
-    vector::append(&mut seed, b"::");
-    vector::append(&mut seed, *string::bytes(name));
+    assert!(name.length() <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
+    let seed = *collection.bytes();
+    seed.append(b"::");
+    seed.append(*name.bytes());
     seed
 }
 
@@ -1068,9 +1357,9 @@ Named objects are derived from a seed, the token's seed is its name appended to
public fun create_token_name_with_seed(collection: &String, name: &String, seed: &String): vector<u8> {
-    assert!(string::length(seed) <= MAX_TOKEN_SEED_LENGTH, error::out_of_range(ESEED_TOO_LONG));
+    assert!(seed.length() <= MAX_TOKEN_SEED_LENGTH, error::out_of_range(ESEED_TOO_LONG));
     let seeds = create_token_seed(collection, name);
-    vector::append(&mut seeds, *string::bytes(seed));
+    seeds.append(*seed.bytes());
     seeds
 }
 
@@ -1154,10 +1443,10 @@ Extracts the tokens address from a BurnRef.
public fun address_from_burn_ref(ref: &BurnRef): address {
-    if (option::is_some(&ref.inner)) {
-        object::address_from_delete_ref(option::borrow(&ref.inner))
+    if (ref.inner.is_some()) {
+        object::address_from_delete_ref(ref.inner.borrow())
     } else {
-        *option::borrow(&ref.self)
+        *ref.self.borrow()
     }
 }
 
@@ -1187,7 +1476,7 @@ Extracts the tokens address from a BurnRef. exists<Token>(token_address), error::not_found(ETOKEN_DOES_NOT_EXIST), ); - borrow_global<Token>(token_address) + &Token[token_address] }
@@ -1316,7 +1605,7 @@ as that would prohibit transactions to be executed in parallel.
public fun name<T: key>(token: Object<T>): String acquires Token, TokenIdentifiers {
     let token_address = object::object_address(&token);
     if (exists<TokenIdentifiers>(token_address)) {
-        aggregator_v2::read_derived_string(&borrow_global<TokenIdentifiers>(token_address).name)
+        aggregator_v2::read_derived_string(&TokenIdentifiers[token_address].name)
     } else {
         borrow(&token).name
     }
@@ -1371,7 +1660,7 @@ as that would prohibit transactions to be executed in parallel.
 
public fun royalty<T: key>(token: Object<T>): Option<Royalty> acquires Token {
     borrow(&token);
     let royalty = royalty::get(token);
-    if (option::is_some(&royalty)) {
+    if (royalty.is_some()) {
         royalty
     } else {
         let creator = creator(token);
@@ -1408,7 +1697,7 @@ as that would prohibit transactions to be executed in parallel.
 
public fun index<T: key>(token: Object<T>): u64 acquires Token, TokenIdentifiers {
     let token_address = object::object_address(&token);
     if (exists<TokenIdentifiers>(token_address)) {
-        aggregator_v2::read_snapshot(&borrow_global<TokenIdentifiers>(token_address).index)
+        aggregator_v2::read_snapshot(&TokenIdentifiers[token_address].index)
     } else {
         borrow(&token).index
     }
@@ -1439,7 +1728,7 @@ as that would prohibit transactions to be executed in parallel.
         exists<Token>(mutator_ref.self),
         error::not_found(ETOKEN_DOES_NOT_EXIST),
     );
-    borrow_global_mut<Token>(mutator_ref.self)
+    &mut Token[mutator_ref.self]
 }
 
@@ -1463,14 +1752,14 @@ as that would prohibit transactions to be executed in parallel.
public fun burn(burn_ref: BurnRef) acquires Token, TokenIdentifiers {
-    let (addr, previous_owner) = if (option::is_some(&burn_ref.inner)) {
-        let delete_ref = option::extract(&mut burn_ref.inner);
+    let (addr, previous_owner) = if (burn_ref.inner.is_some()) {
+        let delete_ref = burn_ref.inner.extract();
         let addr = object::address_from_delete_ref(&delete_ref);
         let previous_owner = object::owner(object::address_to_object<Token>(addr));
         object::delete(delete_ref);
         (addr, previous_owner)
     } else {
-        let addr = option::extract(&mut burn_ref.self);
+        let addr = burn_ref.self.extract();
         let previous_owner = object::owner(object::address_to_object<Token>(addr));
         (addr, previous_owner)
     };
@@ -1523,7 +1812,7 @@ as that would prohibit transactions to be executed in parallel.
 
 
 
public fun set_description(mutator_ref: &MutatorRef, description: String) acquires Token {
-    assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
+    assert!(description.length() <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG));
     let token = borrow_mut(mutator_ref);
     if (std::features::module_event_migration_enabled()) {
         event::emit(Mutation {
@@ -1532,15 +1821,16 @@ as that would prohibit transactions to be executed in parallel.
             old_value: token.description,
             new_value: description
         })
+    } else {
+        event::emit_event(
+            &mut token.mutation_events,
+            MutationEvent {
+                mutated_field_name: string::utf8(b"description"),
+                old_value: token.description,
+                new_value: description
+            },
+        );
     };
-    event::emit_event(
-        &mut token.mutation_events,
-        MutationEvent {
-            mutated_field_name: string::utf8(b"description"),
-            old_value: token.description,
-            new_value: description
-        },
-    );
     token.description = description;
 }
 
@@ -1565,12 +1855,12 @@ as that would prohibit transactions to be executed in parallel.
public fun set_name(mutator_ref: &MutatorRef, name: String) acquires Token, TokenIdentifiers {
-    assert!(string::length(&name) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
+    assert!(name.length() <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG));
 
     let token = borrow_mut(mutator_ref);
 
     let old_name = if (exists<TokenIdentifiers>(mutator_ref.self)) {
-        let token_concurrent = borrow_global_mut<TokenIdentifiers>(mutator_ref.self);
+        let token_concurrent = &mut TokenIdentifiers[mutator_ref.self];
         let old_name = aggregator_v2::read_derived_string(&token_concurrent.name);
         token_concurrent.name = aggregator_v2::create_derived_string(name);
         old_name
@@ -1587,15 +1877,16 @@ as that would prohibit transactions to be executed in parallel.
             old_value: old_name,
             new_value: name
         })
+    } else {
+        event::emit_event(
+            &mut token.mutation_events,
+            MutationEvent {
+                mutated_field_name: string::utf8(b"name"),
+                old_value: old_name,
+                new_value: name
+            },
+        );
     };
-    event::emit_event(
-        &mut token.mutation_events,
-        MutationEvent {
-            mutated_field_name: string::utf8(b"name"),
-            old_value: old_name,
-            new_value: name
-        },
-    );
 }
 
@@ -1619,7 +1910,7 @@ as that would prohibit transactions to be executed in parallel.
public fun set_uri(mutator_ref: &MutatorRef, uri: String) acquires Token {
-    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
+    assert!(uri.length() <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG));
     let token = borrow_mut(mutator_ref);
     if (std::features::module_event_migration_enabled()) {
         event::emit(Mutation {
@@ -1628,15 +1919,16 @@ as that would prohibit transactions to be executed in parallel.
             old_value: token.uri,
             new_value: uri,
         })
+    } else {
+        event::emit_event(
+            &mut token.mutation_events,
+            MutationEvent {
+                mutated_field_name: string::utf8(b"uri"),
+                old_value: token.uri,
+                new_value: uri,
+            },
+        );
     };
-    event::emit_event(
-        &mut token.mutation_events,
-        MutationEvent {
-            mutated_field_name: string::utf8(b"uri"),
-            old_value: token.uri,
-            new_value: uri,
-        },
-    );
     token.uri = uri;
 }
 
diff --git a/aptos-move/framework/aptos-token-objects/sources/aptos_token.move b/aptos-move/framework/aptos-token-objects/sources/aptos_token.move index 5fe04786cd1a9..b124f9fb7c84d 100644 --- a/aptos-move/framework/aptos-token-objects/sources/aptos_token.move +++ b/aptos-move/framework/aptos-token-objects/sources/aptos_token.move @@ -207,9 +207,9 @@ module aptos_token_objects::aptos_token { let freezable_by_creator = are_collection_tokens_freezable(collection); if (freezable_by_creator) { let aptos_token_addr = object::address_from_constructor_ref(&constructor_ref); - let aptos_token = borrow_global_mut(aptos_token_addr); + let aptos_token = &mut AptosToken[aptos_token_addr]; let transfer_ref = object::generate_transfer_ref(&constructor_ref); - option::fill(&mut aptos_token.transfer_ref, transfer_ref); + aptos_token.transfer_ref.fill(transfer_ref); }; object::object_from_constructor_ref(&constructor_ref) @@ -326,7 +326,7 @@ module aptos_token_objects::aptos_token { exists(token_address), error::not_found(ETOKEN_DOES_NOT_EXIST), ); - borrow_global(token_address) + &AptosToken[token_address] } #[view] @@ -337,7 +337,7 @@ module aptos_token_objects::aptos_token { #[view] public fun is_burnable(token: Object): bool acquires AptosToken { - option::is_some(&borrow(&token).burn_ref) + borrow(&token).burn_ref.is_some() } #[view] @@ -373,13 +373,13 @@ module aptos_token_objects::aptos_token { token::creator(*token) == signer::address_of(creator), error::permission_denied(ENOT_CREATOR), ); - borrow_global(token_address) + &AptosToken[token_address] } public entry fun burn(creator: &signer, token: Object) acquires AptosToken { let aptos_token = authorized_borrow(&token, creator); assert!( - option::is_some(&aptos_token.burn_ref), + aptos_token.burn_ref.is_some(), error::permission_denied(ETOKEN_NOT_BURNABLE), ); move aptos_token; @@ -391,17 +391,17 @@ module aptos_token_objects::aptos_token { property_mutator_ref, } = aptos_token; property_map::burn(property_mutator_ref); - token::burn(option::extract(&mut burn_ref)); + token::burn(burn_ref.extract()); } public entry fun freeze_transfer(creator: &signer, token: Object) acquires AptosCollection, AptosToken { let aptos_token = authorized_borrow(&token, creator); assert!( are_collection_tokens_freezable(token::collection_object(token)) - && option::is_some(&aptos_token.transfer_ref), + && aptos_token.transfer_ref.is_some(), error::permission_denied(EFIELD_NOT_MUTABLE), ); - object::disable_ungated_transfer(option::borrow(&aptos_token.transfer_ref)); + object::disable_ungated_transfer(aptos_token.transfer_ref.borrow()); } public entry fun unfreeze_transfer( @@ -411,10 +411,10 @@ module aptos_token_objects::aptos_token { let aptos_token = authorized_borrow(&token, creator); assert!( are_collection_tokens_freezable(token::collection_object(token)) - && option::is_some(&aptos_token.transfer_ref), + && aptos_token.transfer_ref.is_some(), error::permission_denied(EFIELD_NOT_MUTABLE), ); - object::enable_ungated_transfer(option::borrow(&aptos_token.transfer_ref)); + object::enable_ungated_transfer(aptos_token.transfer_ref.borrow()); } public entry fun set_description( @@ -427,7 +427,7 @@ module aptos_token_objects::aptos_token { error::permission_denied(EFIELD_NOT_MUTABLE), ); let aptos_token = authorized_borrow(&token, creator); - token::set_description(option::borrow(&aptos_token.mutator_ref), description); + token::set_description(aptos_token.mutator_ref.borrow(), description); } public entry fun set_name( @@ -440,7 +440,7 @@ module aptos_token_objects::aptos_token { error::permission_denied(EFIELD_NOT_MUTABLE), ); let aptos_token = authorized_borrow(&token, creator); - token::set_name(option::borrow(&aptos_token.mutator_ref), name); + token::set_name(aptos_token.mutator_ref.borrow(), name); } public entry fun set_uri( @@ -453,7 +453,7 @@ module aptos_token_objects::aptos_token { error::permission_denied(EFIELD_NOT_MUTABLE), ); let aptos_token = authorized_borrow(&token, creator); - token::set_uri(option::borrow(&aptos_token.mutator_ref), uri); + token::set_uri(aptos_token.mutator_ref.borrow(), uri); } public entry fun add_property( @@ -545,7 +545,7 @@ module aptos_token_objects::aptos_token { exists(collection_address), error::not_found(ECOLLECTION_DOES_NOT_EXIST), ); - borrow_global(collection_address) + &AptosCollection[collection_address] } public fun is_mutable_collection_description( @@ -557,7 +557,7 @@ module aptos_token_objects::aptos_token { public fun is_mutable_collection_royalty( collection: Object, ): bool acquires AptosCollection { - option::is_some(&borrow_collection(&collection).royalty_mutator_ref) + borrow_collection(&collection).royalty_mutator_ref.is_some() } public fun is_mutable_collection_uri( @@ -614,7 +614,7 @@ module aptos_token_objects::aptos_token { collection::creator(*collection) == signer::address_of(creator), error::permission_denied(ENOT_CREATOR), ); - borrow_global(collection_address) + &AptosCollection[collection_address] } public entry fun set_collection_description( @@ -627,7 +627,7 @@ module aptos_token_objects::aptos_token { aptos_collection.mutable_description, error::permission_denied(EFIELD_NOT_MUTABLE), ); - collection::set_description(option::borrow(&aptos_collection.mutator_ref), description); + collection::set_description(aptos_collection.mutator_ref.borrow(), description); } public fun set_collection_royalties( @@ -637,10 +637,10 @@ module aptos_token_objects::aptos_token { ) acquires AptosCollection { let aptos_collection = authorized_borrow_collection(&collection, creator); assert!( - option::is_some(&aptos_collection.royalty_mutator_ref), + aptos_collection.royalty_mutator_ref.is_some(), error::permission_denied(EFIELD_NOT_MUTABLE), ); - royalty::update(option::borrow(&aptos_collection.royalty_mutator_ref), royalty); + royalty::update(aptos_collection.royalty_mutator_ref.borrow(), royalty); } entry fun set_collection_royalties_call( @@ -664,7 +664,7 @@ module aptos_token_objects::aptos_token { aptos_collection.mutable_uri, error::permission_denied(EFIELD_NOT_MUTABLE), ); - collection::set_uri(option::borrow(&aptos_collection.mutator_ref), uri); + collection::set_uri(aptos_collection.mutator_ref.borrow(), uri); } // Tests @@ -1058,9 +1058,9 @@ module aptos_token_objects::aptos_token { let collection = create_collection_helper(creator, collection_name, true); let token = mint_helper(creator, collection_name, token_name); - let royalty_before = option::extract(&mut token::royalty(token)); + let royalty_before = token::royalty(token).extract(); set_collection_royalties_call(creator, collection, 2, 3, @0x444); - let royalty_after = option::extract(&mut token::royalty(token)); + let royalty_after = token::royalty(token).extract(); assert!(royalty_before != royalty_after, 0); } diff --git a/aptos-move/framework/aptos-token-objects/sources/collection.move b/aptos-move/framework/aptos-token-objects/sources/collection.move index b3adbf6f3bc6a..821ceabf53ce3 100644 --- a/aptos-move/framework/aptos-token-objects/sources/collection.move +++ b/aptos-move/framework/aptos-token-objects/sources/collection.move @@ -18,6 +18,7 @@ /// * Add aggregator support when added to framework module aptos_token_objects::collection { use std::error; + use std::features; use std::option::{Self, Option}; use std::signer; use std::string::{Self, String}; @@ -49,6 +50,8 @@ module aptos_token_objects::collection { const EINVALID_MAX_SUPPLY: u64 = 9; /// The collection does not have a max supply const ENO_MAX_SUPPLY_IN_COLLECTION: u64 = 10; + /// The collection owner feature is not supported + const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 11; const MAX_COLLECTION_NAME_LENGTH: u64 = 128; const MAX_URI_LENGTH: u64 = 512; @@ -210,6 +213,31 @@ module aptos_token_objects::collection { ) } + /// Same functionality as `create_fixed_collection`, but the caller is the owner of the collection. + /// This means that the caller can transfer the collection to another address. + /// This transfers ownership and minting permissions to the new address. + public fun create_fixed_collection_as_owner( + creator: &signer, + description: String, + max_supply: u64, + name: String, + royalty: Option, + uri: String, + ): ConstructorRef { + assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED)); + + let constructor_ref = create_fixed_collection( + creator, + description, + max_supply, + name, + royalty, + uri, + ); + enable_ungated_transfer(&constructor_ref); + constructor_ref + } + /// Creates an unlimited collection. This has support for supply tracking but does not limit /// the supply of tokens. public fun create_unlimited_collection( @@ -238,6 +266,29 @@ module aptos_token_objects::collection { ) } + /// Same functionality as `create_unlimited_collection`, but the caller is the owner of the collection. + /// This means that the caller can transfer the collection to another address. + /// This transfers ownership and minting permissions to the new address. + public fun create_unlimited_collection_as_owner( + creator: &signer, + description: String, + name: String, + royalty: Option, + uri: String, + ): ConstructorRef { + assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED)); + + let constructor_ref = create_unlimited_collection( + creator, + description, + name, + royalty, + uri, + ); + enable_ungated_transfer(&constructor_ref); + constructor_ref + } + /// Creates an untracked collection, or a collection that supports an arbitrary amount of /// tokens. This is useful for mass airdrops that fully leverage Aptos parallelization. /// TODO: Hide this until we bring back meaningful way to enforce burns @@ -271,9 +322,9 @@ module aptos_token_objects::collection { uri: String, supply: Option, ): ConstructorRef { - assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG)); - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); - assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); + assert!(name.length() <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + assert!(description.length() <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); let object_signer = object::generate_signer(&constructor_ref); @@ -286,14 +337,14 @@ module aptos_token_objects::collection { }; move_to(&object_signer, collection); - if (option::is_some(&supply)) { - move_to(&object_signer, option::destroy_some(supply)) + if (supply.is_some()) { + move_to(&object_signer, supply.destroy_some()) } else { - option::destroy_none(supply) + supply.destroy_none() }; - if (option::is_some(&royalty)) { - royalty::init(&constructor_ref, option::extract(&mut royalty)) + if (royalty.is_some()) { + royalty::init(&constructor_ref, royalty.extract()) }; let transfer_ref = object::generate_transfer_ref(&constructor_ref); @@ -302,6 +353,11 @@ module aptos_token_objects::collection { constructor_ref } + inline fun enable_ungated_transfer(constructor_ref: &ConstructorRef) { + let transfer_ref = object::generate_transfer_ref(constructor_ref); + object::enable_ungated_transfer(&transfer_ref); + } + /// Generates the collections address based upon the creators address and the collection's name public fun create_collection_address(creator: &address, name: &String): address { object::create_object_address(creator, create_collection_seed(name)) @@ -309,18 +365,18 @@ module aptos_token_objects::collection { /// Named objects are derived from a seed, the collection's seed is its name. public fun create_collection_seed(name: &String): vector { - assert!(string::length(name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG)); - *string::bytes(name) + assert!(name.length() <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG)); + *name.bytes() } /// Called by token on mint to increment supply if there's an appropriate Supply struct. - public(friend) fun increment_supply( + friend fun increment_supply( collection: &Object, token: address, ): Option> acquires FixedSupply, UnlimitedSupply, ConcurrentSupply { let collection_addr = object::object_address(collection); if (exists(collection_addr)) { - let supply = borrow_global_mut(collection_addr); + let supply = &mut ConcurrentSupply[collection_addr]; assert!( aggregator_v2::try_add(&mut supply.current_supply, 1), error::out_of_range(ECOLLECTION_SUPPLY_EXCEEDED), @@ -335,9 +391,9 @@ module aptos_token_objects::collection { ); option::some(aggregator_v2::snapshot(&supply.total_minted)) } else if (exists(collection_addr)) { - let supply = borrow_global_mut(collection_addr); - supply.current_supply = supply.current_supply + 1; - supply.total_minted = supply.total_minted + 1; + let supply = &mut FixedSupply[collection_addr]; + supply.current_supply += 1; + supply.total_minted += 1; assert!( supply.current_supply <= supply.max_supply, error::out_of_range(ECOLLECTION_SUPPLY_EXCEEDED), @@ -350,18 +406,19 @@ module aptos_token_objects::collection { token, }, ); + } else { + event::emit_event(&mut supply.mint_events, + MintEvent { + index: supply.total_minted, + token, + }, + ); }; - event::emit_event(&mut supply.mint_events, - MintEvent { - index: supply.total_minted, - token, - }, - ); option::some(aggregator_v2::create_snapshot(supply.total_minted)) } else if (exists(collection_addr)) { - let supply = borrow_global_mut(collection_addr); - supply.current_supply = supply.current_supply + 1; - supply.total_minted = supply.total_minted + 1; + let supply = &mut UnlimitedSupply[collection_addr]; + supply.current_supply += 1; + supply.total_minted += 1; if (std::features::module_event_migration_enabled()) { event::emit( Mint { @@ -370,22 +427,44 @@ module aptos_token_objects::collection { token, }, ); + } else { + event::emit_event( + &mut supply.mint_events, + MintEvent { + index: supply.total_minted, + token, + }, + ); }; - event::emit_event( - &mut supply.mint_events, - MintEvent { - index: supply.total_minted, - token, - }, - ); option::some(aggregator_v2::create_snapshot(supply.total_minted)) } else { option::none() } } + spec increment_supply { + pragma aborts_if_is_partial; + let collection_addr = object::object_address(collection); + let supply = global(collection_addr); + let post supply_post = global(collection_addr); + aborts_if exists(collection_addr) && + aggregator_v2::spec_get_value(supply.current_supply) + 1 + > aggregator_v2::spec_get_max_value(supply.current_supply); + aborts_if exists(collection_addr) && + aggregator_v2::spec_get_value(supply.total_minted) + 1 + > aggregator_v2::spec_get_max_value(supply.total_minted); + ensures + aggregator_v2::spec_get_max_value(supply.current_supply) + == aggregator_v2::spec_get_max_value(supply_post.current_supply); + ensures exists(collection_addr) && + aggregator_v2::spec_get_value(supply.current_supply) + 1 + <= aggregator_v2::spec_get_max_value(supply.current_supply) ==> + aggregator_v2::spec_get_value(supply.current_supply) + 1 + == aggregator_v2::spec_get_value(supply_post.current_supply); + } + /// Called by token on burn to decrement supply if there's an appropriate Supply struct. - public(friend) fun decrement_supply( + friend fun decrement_supply( collection: &Object, token: address, index: Option, @@ -393,57 +472,59 @@ module aptos_token_objects::collection { ) acquires FixedSupply, UnlimitedSupply, ConcurrentSupply { let collection_addr = object::object_address(collection); if (exists(collection_addr)) { - let supply = borrow_global_mut(collection_addr); + let supply = &mut ConcurrentSupply[collection_addr]; aggregator_v2::sub(&mut supply.current_supply, 1); event::emit( Burn { collection: collection_addr, - index: *option::borrow(&index), + index: *index.borrow(), token, previous_owner, }, ); } else if (exists(collection_addr)) { - let supply = borrow_global_mut(collection_addr); - supply.current_supply = supply.current_supply - 1; + let supply = &mut FixedSupply[collection_addr]; + supply.current_supply -= 1; if (std::features::module_event_migration_enabled()) { event::emit( Burn { collection: collection_addr, - index: *option::borrow(&index), + index: *index.borrow(), token, previous_owner, }, ); + } else { + event::emit_event( + &mut supply.burn_events, + BurnEvent { + index: *index.borrow(), + token, + }, + ); }; - event::emit_event( - &mut supply.burn_events, - BurnEvent { - index: *option::borrow(&index), - token, - }, - ); } else if (exists(collection_addr)) { - let supply = borrow_global_mut(collection_addr); - supply.current_supply = supply.current_supply - 1; + let supply = &mut UnlimitedSupply[collection_addr]; + supply.current_supply -= 1; if (std::features::module_event_migration_enabled()) { event::emit( Burn { collection: collection_addr, - index: *option::borrow(&index), + index: *index.borrow(), token, previous_owner, }, ); + } else { + event::emit_event( + &mut supply.burn_events, + BurnEvent { + index: *index.borrow(), + token, + }, + ); }; - event::emit_event( - &mut supply.burn_events, - BurnEvent { - index: *option::borrow(&index), - token, - }, - ); } } @@ -514,7 +595,7 @@ module aptos_token_objects::collection { inline fun borrow(collection: &Object): &Collection { let collection_address = object::object_address(collection); check_collection_exists(collection_address); - borrow_global(collection_address) + &Collection[collection_address] } #[view] @@ -529,13 +610,13 @@ module aptos_token_objects::collection { check_collection_exists(collection_address); if (exists(collection_address)) { - let supply = borrow_global_mut(collection_address); + let supply = &ConcurrentSupply[collection_address]; option::some(aggregator_v2::read(&supply.current_supply)) } else if (exists(collection_address)) { - let supply = borrow_global_mut(collection_address); + let supply = &FixedSupply[collection_address]; option::some(supply.current_supply) } else if (exists(collection_address)) { - let supply = borrow_global_mut(collection_address); + let supply = &UnlimitedSupply[collection_address]; option::some(supply.current_supply) } else { option::none() @@ -566,7 +647,7 @@ module aptos_token_objects::collection { inline fun borrow_mut(mutator_ref: &MutatorRef): &mut Collection { check_collection_exists(mutator_ref.self); - borrow_global_mut(mutator_ref.self) + &mut Collection[mutator_ref.self] } /// Callers of this function must be aware that changing the name will change the calculated @@ -576,7 +657,7 @@ module aptos_token_objects::collection { /// /// After changing the collection's name, to create tokens - only call functions that accept the collection object as an argument. public fun set_name(mutator_ref: &MutatorRef, name: String) acquires Collection { - assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG)); + assert!(name.length() <= MAX_COLLECTION_NAME_LENGTH, error::out_of_range(ECOLLECTION_NAME_TOO_LONG)); let collection = borrow_mut(mutator_ref); event::emit(Mutation { mutated_field_name: string::utf8(b"name") , @@ -588,7 +669,7 @@ module aptos_token_objects::collection { } public fun set_description(mutator_ref: &MutatorRef, description: String) acquires Collection { - assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); + assert!(description.length() <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); let collection = borrow_mut(mutator_ref); if (std::features::module_event_migration_enabled()) { event::emit(Mutation { @@ -597,16 +678,17 @@ module aptos_token_objects::collection { old_value: collection.description, new_value: description, }); + } else { + event::emit_event( + &mut collection.mutation_events, + MutationEvent { mutated_field_name: string::utf8(b"description") }, + ); }; collection.description = description; - event::emit_event( - &mut collection.mutation_events, - MutationEvent { mutated_field_name: string::utf8(b"description") }, - ); } public fun set_uri(mutator_ref: &MutatorRef, uri: String) acquires Collection { - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); let collection = borrow_mut(mutator_ref); if (std::features::module_event_migration_enabled()) { event::emit(Mutation { @@ -615,12 +697,13 @@ module aptos_token_objects::collection { old_value: collection.uri, new_value: uri, }); + } else { + event::emit_event( + &mut collection.mutation_events, + MutationEvent { mutated_field_name: string::utf8(b"uri") }, + ); }; collection.uri = uri; - event::emit_event( - &mut collection.mutation_events, - MutationEvent { mutated_field_name: string::utf8(b"uri") }, - ); } public fun set_max_supply(mutator_ref: &MutatorRef, max_supply: u64) acquires ConcurrentSupply, FixedSupply { @@ -629,7 +712,7 @@ module aptos_token_objects::collection { let old_max_supply; if (exists(collection_address)) { - let supply = borrow_global_mut(collection_address); + let supply = &mut ConcurrentSupply[collection_address]; let current_supply = aggregator_v2::read(&supply.current_supply); assert!( max_supply >= current_supply, @@ -639,7 +722,7 @@ module aptos_token_objects::collection { supply.current_supply = aggregator_v2::create_aggregator(max_supply); aggregator_v2::add(&mut supply.current_supply, current_supply); } else if (exists(collection_address)) { - let supply = borrow_global_mut(collection_address); + let supply = &mut FixedSupply[collection_address]; assert!( max_supply >= supply.current_supply, error::out_of_range(EINVALID_MAX_SUPPLY), @@ -695,12 +778,12 @@ module aptos_token_objects::collection { let collection_address = create_collection_address(&creator_address, &name); let collection = object::address_to_object(collection_address); assert!(count(collection) == option::some(0), 0); - let cid = aggregator_v2::read_snapshot(&option::destroy_some(increment_supply(&collection, creator_address))); + let cid = aggregator_v2::read_snapshot(&increment_supply(&collection, creator_address).destroy_some()); assert!(count(collection) == option::some(1), 0); - assert!(event::counter(&borrow_global(collection_address).mint_events) == 1, 0); + assert!(event::emitted_events().length() == 1, 0); decrement_supply(&collection, creator_address, option::some(cid), creator_address); assert!(count(collection) == option::some(0), 0); - assert!(event::counter(&borrow_global(collection_address).burn_events) == 1, 0); + assert!(event::emitted_events().length() == 1, 0); } #[test(creator = @0x123)] @@ -713,12 +796,12 @@ module aptos_token_objects::collection { let collection_address = create_collection_address(&creator_address, &name); let collection = object::address_to_object(collection_address); assert!(count(collection) == option::some(0), 0); - let cid = aggregator_v2::read_snapshot(&option::destroy_some(increment_supply(&collection, creator_address))); + let cid = aggregator_v2::read_snapshot(&increment_supply(&collection, creator_address).destroy_some()); assert!(count(collection) == option::some(1), 0); - assert!(event::counter(&borrow_global(collection_address).mint_events) == 1, 0); + assert!(event::emitted_events().length() == 1, 0); decrement_supply(&collection, creator_address, option::some(cid), creator_address); assert!(count(collection) == option::some(0), 0); - assert!(event::counter(&borrow_global(collection_address).burn_events) == 1, 0); + assert!(event::emitted_events().length() == 1, 0); } #[test(creator = @0x123)] @@ -753,6 +836,7 @@ module aptos_token_objects::collection { #[expected_failure(abort_code = 0x50003, location = aptos_framework::object)] entry fun test_create_and_transfer(creator: &signer, trader: &signer) { let creator_address = signer::address_of(creator); + let trader_address = signer::address_of(trader); let collection_name = string::utf8(b"collection name"); create_collection_helper(creator, collection_name); @@ -760,7 +844,24 @@ module aptos_token_objects::collection { create_collection_address(&creator_address, &collection_name), ); assert!(object::owner(collection) == creator_address, 1); - object::transfer(creator, collection, signer::address_of(trader)); + object::transfer(creator, collection, trader_address); + } + + #[test(creator = @0x123, trader = @0x456, aptos_framework = @aptos_framework)] + entry fun test_create_and_transfer_as_owner(creator: &signer, trader: &signer, aptos_framework: &signer) { + features::change_feature_flags_for_testing(aptos_framework, vector[features::get_collection_owner_feature()], vector[]); + let creator_address = signer::address_of(creator); + let trader_address = signer::address_of(trader); + let collection_name = string::utf8(b"collection name"); + create_unlimited_collection_as_owner_helper(creator, collection_name); + + let collection = object::address_to_object( + create_collection_address(&creator_address, &collection_name), + ); + assert!(object::owner(collection) == creator_address, 1); + // Transferring owned collections are allowed + object::transfer(creator, collection, trader_address); + assert!(object::owner(collection) == trader_address, 1); } #[test(creator = @0x123)] @@ -830,7 +931,7 @@ module aptos_token_objects::collection { set_max_supply(&mutator_ref, new_max_supply); let collection_address = create_collection_address(&signer::address_of(creator), &collection_name); - let supply = borrow_global(collection_address); + let supply = &ConcurrentSupply[collection_address]; assert!(aggregator_v2::max_value(&supply.current_supply) == new_max_supply, 0); event::was_event_emitted(&SetMaxSupply { @@ -851,17 +952,15 @@ module aptos_token_objects::collection { let token_signer = create_token(creator); let current_supply = 5; - let i = 0; - while (i < current_supply) { + for (i in 0..current_supply) { increment_supply(&collection, signer::address_of(&token_signer)); - i = i + 1; }; let mutator_ref = generate_mutator_ref(&constructor_ref); set_max_supply(&mutator_ref, current_supply); let collection_address = create_collection_address(&signer::address_of(creator), &collection_name); - let supply = borrow_global(collection_address); + let supply = &ConcurrentSupply[collection_address]; assert!(aggregator_v2::max_value(&supply.current_supply) == current_supply, EINVALID_MAX_SUPPLY); event::was_event_emitted(&SetMaxSupply { @@ -891,10 +990,8 @@ module aptos_token_objects::collection { let collection = object::object_from_constructor_ref(&constructor_ref); let token_signer = create_token(creator); - let i = 0; - while (i < max_supply) { + for (i in 0..max_supply) { increment_supply(&collection, signer::address_of(&token_signer)); - i = i + 1; }; let mutator_ref = generate_mutator_ref(&constructor_ref); @@ -911,10 +1008,8 @@ module aptos_token_objects::collection { let collection = object::object_from_constructor_ref(&constructor_ref); let token_signer = create_token(creator); - let i = 0; - while (i < max_supply) { + for (i in 0..max_supply) { increment_supply(&collection, signer::address_of(&token_signer)); - i = i + 1; }; let mutator_ref = generate_mutator_ref(&constructor_ref); @@ -945,6 +1040,17 @@ module aptos_token_objects::collection { ) } + #[test_only] + fun create_unlimited_collection_as_owner_helper(creator: &signer, name: String): ConstructorRef { + create_unlimited_collection_as_owner( + creator, + string::utf8(b"description"), + name, + option::none(), + string::utf8(b"uri"), + ) + } + #[test_only] /// Create a token as we cannot create a dependency cycle between collection and token modules. fun create_token(creator: &signer): signer { diff --git a/aptos-move/framework/aptos-token-objects/sources/property_map.move b/aptos-move/framework/aptos-token-objects/sources/property_map.move index 53d983060064e..80a281df13994 100644 --- a/aptos-move/framework/aptos-token-objects/sources/property_map.move +++ b/aptos-move/framework/aptos-token-objects/sources/property_map.move @@ -3,7 +3,6 @@ /// represent types and storing values in bcs format. module aptos_token_objects::property_map { use std::bcs; - use std::vector; use std::error; use std::string::{Self, String}; use aptos_std::from_bcs; @@ -87,26 +86,26 @@ module aptos_token_objects::property_map { types: vector, values: vector>, ): PropertyMap { - let length = vector::length(&keys); + let length = keys.length(); assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(ETOO_MANY_PROPERTIES)); - assert!(length == vector::length(&values), error::invalid_argument(EKEY_VALUE_COUNT_MISMATCH)); - assert!(length == vector::length(&types), error::invalid_argument(EKEY_TYPE_COUNT_MISMATCH)); + assert!(length == values.length(), error::invalid_argument(EKEY_VALUE_COUNT_MISMATCH)); + assert!(length == types.length(), error::invalid_argument(EKEY_TYPE_COUNT_MISMATCH)); let container = simple_map::create(); - while (!vector::is_empty(&keys)) { - let key = vector::pop_back(&mut keys); + while (!keys.is_empty()) { + let key = keys.pop_back(); assert!( - string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, + key.length() <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_KEY_TOO_LONG), ); - let value = vector::pop_back(&mut values); - let type = vector::pop_back(&mut types); + let value = values.pop_back(); + let type = types.pop_back(); let new_type = to_internal_type(type); validate_type(new_type, value); - simple_map::add(&mut container, key, PropertyValue { value, type: new_type }); + container.add(key, PropertyValue { value, type: new_type }); }; PropertyMap { inner: container } @@ -207,14 +206,14 @@ module aptos_token_objects::property_map { public fun contains_key(object: &Object, key: &String): bool acquires PropertyMap { assert_exists(object::object_address(object)); - let property_map = borrow_global(object::object_address(object)); - simple_map::contains_key(&property_map.inner, key) + let property_map = &PropertyMap[object::object_address(object)]; + property_map.inner.contains_key(key) } public fun length(object: &Object): u64 acquires PropertyMap { assert_exists(object::object_address(object)); - let property_map = borrow_global(object::object_address(object)); - simple_map::length(&property_map.inner) + let property_map = &PropertyMap[object::object_address(object)]; + property_map.inner.length() } /// Read the property and get it's external type in it's bcs encoded format @@ -222,8 +221,8 @@ module aptos_token_objects::property_map { /// The preferred method is to use `read_` where the type is already known. public fun read(object: &Object, key: &String): (String, vector) acquires PropertyMap { assert_exists(object::object_address(object)); - let property_map = borrow_global(object::object_address(object)); - let property_value = simple_map::borrow(&property_map.inner, key); + let property_map = &PropertyMap[object::object_address(object)]; + let property_value = property_map.inner.borrow(key); let new_type = to_external_type(property_value.type); (new_type, property_value.value) } @@ -311,8 +310,8 @@ module aptos_token_objects::property_map { inline fun add_internal(ref: &MutatorRef, key: String, type: u8, value: vector) acquires PropertyMap { assert_exists(ref.self); - let property_map = borrow_global_mut(ref.self); - simple_map::add(&mut property_map.inner, key, PropertyValue { type, value }); + let property_map = &mut PropertyMap[ref.self]; + property_map.inner.add(key, PropertyValue { type, value }); } /// Updates a property in place already bcs encoded @@ -330,16 +329,16 @@ module aptos_token_objects::property_map { inline fun update_internal(ref: &MutatorRef, key: &String, type: u8, value: vector) acquires PropertyMap { assert_exists(ref.self); - let property_map = borrow_global_mut(ref.self); - let old_value = simple_map::borrow_mut(&mut property_map.inner, key); + let property_map = &mut PropertyMap[ref.self]; + let old_value = property_map.inner.borrow_mut(key); *old_value = PropertyValue { type, value }; } /// Removes a property from the map, ensuring that it does in fact exist public fun remove(ref: &MutatorRef, key: &String) acquires PropertyMap { assert_exists(ref.self); - let property_map = borrow_global_mut(ref.self); - simple_map::remove(&mut property_map.inner, key); + let property_map = &mut PropertyMap[ref.self]; + property_map.inner.remove(key); } // Tests diff --git a/aptos-move/framework/aptos-token-objects/sources/royalty.move b/aptos-move/framework/aptos-token-objects/sources/royalty.move index e3068b68ad4ff..19c0f34afe514 100644 --- a/aptos-move/framework/aptos-token-objects/sources/royalty.move +++ b/aptos-move/framework/aptos-token-objects/sources/royalty.move @@ -66,7 +66,7 @@ module aptos_token_objects::royalty { exists(addr) } - public(friend) fun delete(addr: address) acquires Royalty { + friend fun delete(addr: address) acquires Royalty { assert!(exists(addr), error::not_found(EROYALTY_DOES_NOT_EXIST)); move_from(addr); } @@ -75,7 +75,7 @@ module aptos_token_objects::royalty { public fun get(maybe_royalty: Object): Option acquires Royalty { let obj_addr = object::object_address(&maybe_royalty); if (exists(obj_addr)) { - option::some(*borrow_global(obj_addr)) + option::some(Royalty[obj_addr]) } else { option::none() } diff --git a/aptos-move/framework/aptos-token-objects/sources/token.move b/aptos-move/framework/aptos-token-objects/sources/token.move index 0d944c99c5cfc..0c4dc68141a70 100644 --- a/aptos-move/framework/aptos-token-objects/sources/token.move +++ b/aptos-move/framework/aptos-token-objects/sources/token.move @@ -6,10 +6,10 @@ /// module aptos_token_objects::token { use std::error; + use std::features; use std::option::{Self, Option}; use std::string::{Self, String}; use std::signer; - use std::vector; use aptos_framework::aggregator_v2::{Self, AggregatorSnapshot, DerivedStringSnapshot}; use aptos_framework::event; use aptos_framework::object::{Self, ConstructorRef, Object}; @@ -33,6 +33,10 @@ module aptos_token_objects::token { const EDESCRIPTION_TOO_LONG: u64 = 6; /// The seed is over the maximum length const ESEED_TOO_LONG: u64 = 7; + /// The calling signer is not the owner + const ENOT_OWNER: u64 = 8; + /// The collection owner feature is not supported + const ECOLLECTION_OWNER_NOT_SUPPORTED: u64 = 9; const MAX_TOKEN_NAME_LENGTH: u64 = 128; const MAX_TOKEN_SEED_LENGTH: u64 = 128; @@ -156,30 +160,75 @@ module aptos_token_objects::token { ) { assert!(collection::creator(collection) == signer::address_of(creator), error::unauthenticated(ENOT_CREATOR)); - if (option::is_some(&name_with_index_suffix)) { + create_common_with_collection_internal( + constructor_ref, + collection, + description, + name_prefix, + name_with_index_suffix, + royalty, + uri + ); + } + + inline fun create_common_with_collection_as_owner( + owner: &signer, + constructor_ref: &ConstructorRef, + collection: Object, + description: String, + name_prefix: String, + // If option::some, numbered token is created - i.e. index is appended to the name. + // If option::none, name_prefix is the full name of the token. + name_with_index_suffix: Option, + royalty: Option, + uri: String, + ) { + assert!(features::is_collection_owner_enabled(), error::unavailable(ECOLLECTION_OWNER_NOT_SUPPORTED)); + assert!(object::owner(collection) == signer::address_of(owner), error::unauthenticated(ENOT_OWNER)); + + create_common_with_collection_internal( + constructor_ref, + collection, + description, + name_prefix, + name_with_index_suffix, + royalty, + uri + ); + } + + inline fun create_common_with_collection_internal( + constructor_ref: &ConstructorRef, + collection: Object, + description: String, + name_prefix: String, + // If option::some, numbered token is created - i.e. index is appended to the name. + // If option::none, name_prefix is the full name of the token. + name_with_index_suffix: Option, + royalty: Option, + uri: String, + ) { + if (name_with_index_suffix.is_some()) { // Be conservative, as we don't know what length the index will be, and assume worst case (20 chars in MAX_U64) assert!( - string::length(&name_prefix) + 20 + string::length( - option::borrow(&name_with_index_suffix) - ) <= MAX_TOKEN_NAME_LENGTH, + name_prefix.length() + 20 + name_with_index_suffix.borrow().length() <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG) ); } else { - assert!(string::length(&name_prefix) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG)); + assert!(name_prefix.length() <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG)); }; - assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + assert!(description.length() <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); let object_signer = object::generate_signer(constructor_ref); - let index = option::destroy_with_default( - collection::increment_supply(&collection, signer::address_of(&object_signer)), + let index = collection::increment_supply(&collection, signer::address_of(&object_signer)).destroy_with_default( aggregator_v2::create_snapshot(0) ); // If create_numbered_token called us, add index to the name. - let name = if (option::is_some(&name_with_index_suffix)) { - aggregator_v2::derive_string_concat(name_prefix, &index, option::extract(&mut name_with_index_suffix)) + let name = if (name_with_index_suffix.is_some()) { + aggregator_v2::derive_string_concat(name_prefix, &index, name_with_index_suffix.extract()) } else { aggregator_v2::create_derived_string(name_prefix) }; @@ -203,8 +252,8 @@ module aptos_token_objects::token { }; move_to(&object_signer, token); - if (option::is_some(&royalty)) { - royalty::init(constructor_ref, option::extract(&mut royalty)) + if (royalty.is_some()) { + royalty::init(constructor_ref, royalty.extract()) }; } @@ -260,6 +309,30 @@ module aptos_token_objects::token { constructor_ref } + /// Same functionality as `create_token`, but the token can only be created by the collection owner. + public fun create_token_as_collection_owner( + creator: &signer, + collection: Object, + description: String, + name: String, + royalty: Option, + uri: String, + ): ConstructorRef { + let creator_address = signer::address_of(creator); + let constructor_ref = object::create_object(creator_address); + create_common_with_collection_as_owner( + creator, + &constructor_ref, + collection, + description, + name, + option::none(), + royalty, + uri + ); + constructor_ref + } + /// Creates a new token object with a unique address and returns the ConstructorRef /// for additional specialization. /// The name is created by concatenating the (name_prefix, index, name_suffix). @@ -321,6 +394,31 @@ module aptos_token_objects::token { constructor_ref } + /// Same functionality as `create_numbered_token_object`, but the token can only be created by the collection owner. + public fun create_numbered_token_as_collection_owner( + creator: &signer, + collection: Object, + description: String, + name_with_index_prefix: String, + name_with_index_suffix: String, + royalty: Option, + uri: String, + ): ConstructorRef { + let creator_address = signer::address_of(creator); + let constructor_ref = object::create_object(creator_address); + create_common_with_collection_as_owner( + creator, + &constructor_ref, + collection, + description, + name_with_index_prefix, + option::some(name_with_index_suffix), + royalty, + uri + ); + constructor_ref + } + /// Creates a new token object from a token name and returns the ConstructorRef for /// additional specialization. /// This function must be called if the collection name has been previously changed. @@ -373,6 +471,30 @@ module aptos_token_objects::token { constructor_ref } + /// Same functionality as `create_named_token_object`, but the token can only be created by the collection owner. + public fun create_named_token_as_collection_owner( + creator: &signer, + collection: Object, + description: String, + name: String, + royalty: Option, + uri: String, + ): ConstructorRef { + let seed = create_token_seed(&collection::name(collection), &name); + let constructor_ref = object::create_named_object(creator, seed); + create_common_with_collection_as_owner( + creator, + &constructor_ref, + collection, + description, + name, + option::none(), + royalty, + uri + ); + constructor_ref + } + /// Creates a new token object from a token name and seed. /// Returns the ConstructorRef for additional specialization. /// This function must be called if the collection name has been previously changed. @@ -391,6 +513,31 @@ module aptos_token_objects::token { constructor_ref } + /// Same functionality as `create_named_token_from_seed`, but the token can only be created by the collection owner. + public fun create_named_token_from_seed_as_collection_owner( + creator: &signer, + collection: Object, + description: String, + name: String, + seed: String, + royalty: Option, + uri: String, + ): ConstructorRef { + let seed = create_token_name_with_seed(&collection::name(collection), &name, &seed); + let constructor_ref = object::create_named_object(creator, seed); + create_common_with_collection_as_owner( + creator, + &constructor_ref, + collection, + description, + name, + option::none(), + royalty, + uri + ); + constructor_ref + } + #[deprecated] /// DEPRECATED: Use `create` instead for identical behavior. /// @@ -432,17 +579,17 @@ module aptos_token_objects::token { /// Named objects are derived from a seed, the token's seed is its name appended to the collection's name. public fun create_token_seed(collection: &String, name: &String): vector { - assert!(string::length(name) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG)); - let seed = *string::bytes(collection); - vector::append(&mut seed, b"::"); - vector::append(&mut seed, *string::bytes(name)); + assert!(name.length() <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG)); + let seed = *collection.bytes(); + seed.append(b"::"); + seed.append(*name.bytes()); seed } public fun create_token_name_with_seed(collection: &String, name: &String, seed: &String): vector { - assert!(string::length(seed) <= MAX_TOKEN_SEED_LENGTH, error::out_of_range(ESEED_TOO_LONG)); + assert!(seed.length() <= MAX_TOKEN_SEED_LENGTH, error::out_of_range(ESEED_TOO_LONG)); let seeds = create_token_seed(collection, name); - vector::append(&mut seeds, *string::bytes(seed)); + seeds.append(*seed.bytes()); seeds } @@ -466,10 +613,10 @@ module aptos_token_objects::token { /// Extracts the tokens address from a BurnRef. public fun address_from_burn_ref(ref: &BurnRef): address { - if (option::is_some(&ref.inner)) { - object::address_from_delete_ref(option::borrow(&ref.inner)) + if (ref.inner.is_some()) { + object::address_from_delete_ref(ref.inner.borrow()) } else { - *option::borrow(&ref.self) + *ref.self.borrow() } } @@ -481,7 +628,7 @@ module aptos_token_objects::token { exists(token_address), error::not_found(ETOKEN_DOES_NOT_EXIST), ); - borrow_global(token_address) + &Token[token_address] } #[view] @@ -511,7 +658,7 @@ module aptos_token_objects::token { // fun name_snapshot(token: &Object): AggregatorSnapshot acquires Token, TokenIdentifiers { // let token_address = object::object_address(token); // if (exists(token_address)) { - // aggregator_v2::copy_snapshot(&borrow_global(token_address).name) + // aggregator_v2::copy_snapshot(&TokenIdentifiers[token_address].name) // } else { // aggregator_v2::create_snapshot(borrow(token).name) // } @@ -523,7 +670,7 @@ module aptos_token_objects::token { public fun name(token: Object): String acquires Token, TokenIdentifiers { let token_address = object::object_address(&token); if (exists(token_address)) { - aggregator_v2::read_derived_string(&borrow_global(token_address).name) + aggregator_v2::read_derived_string(&TokenIdentifiers[token_address].name) } else { borrow(&token).name } @@ -538,7 +685,7 @@ module aptos_token_objects::token { public fun royalty(token: Object): Option acquires Token { borrow(&token); let royalty = royalty::get(token); - if (option::is_some(&royalty)) { + if (royalty.is_some()) { royalty } else { let creator = creator(token); @@ -556,7 +703,7 @@ module aptos_token_objects::token { // fun index_snapshot(token: &Object): AggregatorSnapshot acquires Token, TokenIdentifiers { // let token_address = object::object_address(token); // if (exists(token_address)) { - // aggregator_v2::copy_snapshot(&borrow_global(token_address).index) + // aggregator_v2::copy_snapshot(&TokenIdentifiers[token_address].index) // } else { // aggregator_v2::create_snapshot(borrow(token).index) // } @@ -568,7 +715,7 @@ module aptos_token_objects::token { public fun index(token: Object): u64 acquires Token, TokenIdentifiers { let token_address = object::object_address(&token); if (exists(token_address)) { - aggregator_v2::read_snapshot(&borrow_global(token_address).index) + aggregator_v2::read_snapshot(&TokenIdentifiers[token_address].index) } else { borrow(&token).index } @@ -581,18 +728,18 @@ module aptos_token_objects::token { exists(mutator_ref.self), error::not_found(ETOKEN_DOES_NOT_EXIST), ); - borrow_global_mut(mutator_ref.self) + &mut Token[mutator_ref.self] } public fun burn(burn_ref: BurnRef) acquires Token, TokenIdentifiers { - let (addr, previous_owner) = if (option::is_some(&burn_ref.inner)) { - let delete_ref = option::extract(&mut burn_ref.inner); + let (addr, previous_owner) = if (burn_ref.inner.is_some()) { + let delete_ref = burn_ref.inner.extract(); let addr = object::address_from_delete_ref(&delete_ref); let previous_owner = object::owner(object::address_to_object(addr)); object::delete(delete_ref); (addr, previous_owner) } else { - let addr = option::extract(&mut burn_ref.self); + let addr = burn_ref.self.extract(); let previous_owner = object::owner(object::address_to_object(addr)); (addr, previous_owner) }; @@ -625,7 +772,7 @@ module aptos_token_objects::token { } public fun set_description(mutator_ref: &MutatorRef, description: String) acquires Token { - assert!(string::length(&description) <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); + assert!(description.length() <= MAX_DESCRIPTION_LENGTH, error::out_of_range(EDESCRIPTION_TOO_LONG)); let token = borrow_mut(mutator_ref); if (std::features::module_event_migration_enabled()) { event::emit(Mutation { @@ -634,25 +781,26 @@ module aptos_token_objects::token { old_value: token.description, new_value: description }) + } else { + event::emit_event( + &mut token.mutation_events, + MutationEvent { + mutated_field_name: string::utf8(b"description"), + old_value: token.description, + new_value: description + }, + ); }; - event::emit_event( - &mut token.mutation_events, - MutationEvent { - mutated_field_name: string::utf8(b"description"), - old_value: token.description, - new_value: description - }, - ); token.description = description; } public fun set_name(mutator_ref: &MutatorRef, name: String) acquires Token, TokenIdentifiers { - assert!(string::length(&name) <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG)); + assert!(name.length() <= MAX_TOKEN_NAME_LENGTH, error::out_of_range(ETOKEN_NAME_TOO_LONG)); let token = borrow_mut(mutator_ref); let old_name = if (exists(mutator_ref.self)) { - let token_concurrent = borrow_global_mut(mutator_ref.self); + let token_concurrent = &mut TokenIdentifiers[mutator_ref.self]; let old_name = aggregator_v2::read_derived_string(&token_concurrent.name); token_concurrent.name = aggregator_v2::create_derived_string(name); old_name @@ -669,19 +817,20 @@ module aptos_token_objects::token { old_value: old_name, new_value: name }) + } else { + event::emit_event( + &mut token.mutation_events, + MutationEvent { + mutated_field_name: string::utf8(b"name"), + old_value: old_name, + new_value: name + }, + ); }; - event::emit_event( - &mut token.mutation_events, - MutationEvent { - mutated_field_name: string::utf8(b"name"), - old_value: old_name, - new_value: name - }, - ); } public fun set_uri(mutator_ref: &MutatorRef, uri: String) acquires Token { - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::out_of_range(EURI_TOO_LONG)); let token = borrow_mut(mutator_ref); if (std::features::module_event_migration_enabled()) { event::emit(Mutation { @@ -690,15 +839,16 @@ module aptos_token_objects::token { old_value: token.uri, new_value: uri, }) + } else { + event::emit_event( + &mut token.mutation_events, + MutationEvent { + mutated_field_name: string::utf8(b"uri"), + old_value: token.uri, + new_value: uri, + }, + ); }; - event::emit_event( - &mut token.mutation_events, - MutationEvent { - mutated_field_name: string::utf8(b"uri"), - old_value: token.uri, - new_value: uri, - }, - ); token.uri = uri; } @@ -721,6 +871,27 @@ module aptos_token_objects::token { assert!(option::some(expected_royalty) == royalty(token), 2); } + #[test(creator = @0x123, trader = @0x456, aptos_framework = @aptos_framework)] + fun test_create_and_transfer_token_as_collection_owner(creator: &signer, trader: &signer, aptos_framework: &signer) acquires Token { + features::change_feature_flags_for_testing(aptos_framework, vector[features::get_collection_owner_feature()], vector[]); + let collection_name = string::utf8(b"collection name"); + let token_name = string::utf8(b"token name"); + + let extend_ref = create_collection_as_collection_owner_helper(creator, collection_name, 1); + let collection = get_collection_from_ref(&extend_ref); + create_named_token_as_collection_owner_helper(creator, collection, token_name); + + let creator_address = signer::address_of(creator); + let token_addr = create_token_address(&creator_address, &collection_name, &token_name); + let token = object::address_to_object(token_addr); + assert!(object::owner(token) == creator_address, 1); + object::transfer(creator, token, signer::address_of(trader)); + assert!(object::owner(token) == signer::address_of(trader), 1); + + let expected_royalty = royalty::create(25, 10000, creator_address); + assert!(option::some(expected_royalty) == royalty(token), 2); + } + #[test(creator = @0x123, trader = @0x456)] #[expected_failure(abort_code = 0x40002, location = aptos_token_objects::token)] fun test_create_token_non_creator(creator: &signer, trader: &signer) { @@ -732,6 +903,18 @@ module aptos_token_objects::token { ); } + #[test(creator = @0x123, trader = @0x456, aptos_framework = @aptos_framework)] + #[expected_failure(abort_code = 0x40008, location = aptos_token_objects::token)] + fun test_create_token_non_collection_owner(creator: &signer, trader: &signer, aptos_framework: &signer) { + features::change_feature_flags_for_testing(aptos_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_token_as_collection_owner( + trader, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + } + #[test(creator = @0x123, trader = @0x456)] #[expected_failure(abort_code = 0x40002, location = aptos_token_objects::token)] fun test_create_named_token_non_creator(creator: &signer, trader: &signer) { @@ -740,6 +923,15 @@ module aptos_token_objects::token { create_token_with_collection_helper(trader, collection, string::utf8(b"token name")); } + #[test(creator = @0x123, trader = @0x456, aptos_framework = @aptos_framework)] + #[expected_failure(abort_code = 0x40008, location = aptos_token_objects::token)] + fun test_create_named_token_non_collection_owner(creator: &signer, trader: &signer, aptos_framework: &signer) { + features::change_feature_flags_for_testing(aptos_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_named_token_as_collection_owner_helper(trader, collection, string::utf8(b"token name")); + } + #[test(creator = @0x123, trader = @0x456)] #[expected_failure(abort_code = 0x40002, location = aptos_token_objects::token)] fun test_create_named_token_object_non_creator(creator: &signer, trader: &signer) { @@ -762,6 +954,18 @@ module aptos_token_objects::token { ); } + #[test(creator = @0x123, trader = @0x456, aptos_framework = @aptos_framework)] + #[expected_failure(abort_code = 0x40008, location = aptos_token_objects::token)] + fun test_create_named_token_from_seed_non_collection_owner(creator: &signer, trader: &signer, aptos_framework: &signer) { + features::change_feature_flags_for_testing(aptos_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_named_token_as_collection_owner( + trader, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + } + #[test(creator = @0x123, trader = @0x456)] fun test_create_and_transfer_token_with_seed(creator: &signer, trader: &signer) acquires Token { let collection_name = string::utf8(b"collection name"); @@ -784,6 +988,45 @@ module aptos_token_objects::token { assert!(option::some(expected_royalty) == royalty(token), 2); } + #[test(creator = @0x123, trader = @0x456, aptos_framework = @aptos_framework)] + #[expected_failure(abort_code = 0x40008, location = aptos_token_objects::token)] + fun test_create_token_after_transferring_collection(creator: &signer, trader: &signer, aptos_framework: &signer) { + features::change_feature_flags_for_testing(aptos_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_token_as_collection_owner( + creator, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + + object::transfer(creator, collection, signer::address_of(trader)); + + // This should fail as the collection is no longer owned by the creator. + create_token_as_collection_owner( + creator, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + } + + #[test(creator = @0x123, trader = @0x456, aptos_framework = @aptos_framework)] + fun create_token_works_with_new_collection_owner(creator: &signer, trader: &signer, aptos_framework: &signer) { + features::change_feature_flags_for_testing(aptos_framework, vector[features::get_collection_owner_feature()], vector[]); + let constructor_ref = &create_fixed_collection_as_collection_owner(creator, string::utf8(b"collection name"), 5); + let collection = get_collection_from_ref(&object::generate_extend_ref(constructor_ref)); + create_token_as_collection_owner( + creator, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + + object::transfer(creator, collection, signer::address_of(trader)); + + // This should pass as `trader` is the new collection owner + create_token_as_collection_owner( + trader, collection, string::utf8(b"token description"), string::utf8(b"token name"), + option::some(royalty::create(25, 10000, signer::address_of(creator))), string::utf8(b"uri"), + ); + } + #[test(creator = @0x123)] fun test_collection_royalty(creator: &signer) acquires Token { let collection_name = string::utf8(b"collection name"); @@ -1027,13 +1270,13 @@ module aptos_token_objects::token { let token_2_ref = create_numbered_token_helper(creator, collection, token_name); assert!(name(object::object_from_constructor_ref(&token_2_ref)) == std::string::utf8(b"token name2"), 1); - assert!(vector::length(&event::emitted_events()) == 2, 0); + assert!(event::emitted_events().length() == 2, 0); let burn_ref = generate_burn_ref(&token_2_ref); let token_addr = object::address_from_constructor_ref(&token_2_ref); assert!(exists(token_addr), 0); burn(burn_ref); - assert!(vector::length(&event::emitted_events()) == 1, 0); + assert!(event::emitted_events().length() == 1, 0); } #[test(creator = @0x123)] @@ -1059,6 +1302,12 @@ module aptos_token_objects::token { object::generate_extend_ref(&constructor_ref) } + #[test_only] + fun create_collection_as_collection_owner_helper(creator: &signer, collection_name: String, max_supply: u64): ExtendRef { + let constructor_ref = create_fixed_collection_as_collection_owner(creator, collection_name, max_supply); + object::generate_extend_ref(&constructor_ref) + } + #[test_only] fun create_fixed_collection(creator: &signer, collection_name: String, max_supply: u64): ConstructorRef { collection::create_fixed_collection( @@ -1071,6 +1320,22 @@ module aptos_token_objects::token { ) } + #[test_only] + fun create_fixed_collection_as_collection_owner( + creator: &signer, + collection_name: String, + max_supply: u64, + ): ConstructorRef { + collection::create_fixed_collection_as_owner( + creator, + string::utf8(b"collection description as owner"), + max_supply, + collection_name, + option::none(), + string::utf8(b"collection uri as owner"), + ) + } + #[test_only] fun create_token_helper(creator: &signer, collection_name: String, token_name: String): ConstructorRef { create_named_token( @@ -1084,7 +1349,11 @@ module aptos_token_objects::token { } #[test_only] - fun create_token_with_collection_helper(creator: &signer, collection: Object, token_name: String): ConstructorRef { + fun create_token_with_collection_helper( + creator: &signer, + collection: Object, + token_name: String + ): ConstructorRef { create_named_token_object( creator, collection, @@ -1096,7 +1365,28 @@ module aptos_token_objects::token { } #[test_only] - fun create_token_object_with_seed_helper(creator: &signer, collection: Object, token_name: String, seed: String): ConstructorRef { + fun create_named_token_as_collection_owner_helper( + creator: &signer, + collection: Object, + token_name: String + ): ConstructorRef { + create_named_token_as_collection_owner( + creator, + collection, + string::utf8(b"token description"), + token_name, + option::some(royalty::create(25, 10000, signer::address_of(creator))), + string::utf8(b"uri"), + ) + } + + #[test_only] + fun create_token_object_with_seed_helper( + creator: &signer, + collection: Object, + token_name: String, + seed: String + ): ConstructorRef { create_named_token_from_seed( creator, collection, @@ -1109,7 +1399,11 @@ module aptos_token_objects::token { } #[test_only] - fun create_numbered_token_helper(creator: &signer, collection: Object, token_prefix: String): ConstructorRef { + fun create_numbered_token_helper( + creator: &signer, + collection: Object, + token_prefix: String + ): ConstructorRef { create_numbered_token_object( creator, collection, diff --git a/aptos-move/framework/aptos-token/doc/property_map.md b/aptos-move/framework/aptos-token/doc/property_map.md index a5a25d7be62c7..ed6f8db027dab 100644 --- a/aptos-move/framework/aptos-token/doc/property_map.md +++ b/aptos-move/framework/aptos-token/doc/property_map.md @@ -245,23 +245,20 @@ The maximal number of property that can be stored in property map values: vector<vector<u8>>, types: vector<String> ): PropertyMap { - let length = vector::length(&keys); + let length = keys.length(); assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(EPROPERTY_NUMBER_EXCEED_LIMIT)); - assert!(length == vector::length(&values), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); - assert!(length == vector::length(&types), error::invalid_argument(EKEY_COUNT_NOT_MATCH_TYPE_COUNT)); + assert!(length == values.length(), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); + assert!(length == types.length(), error::invalid_argument(EKEY_COUNT_NOT_MATCH_TYPE_COUNT)); let properties = empty(); - let i = 0; - while (i < length) { - let key = *vector::borrow(&keys, i); - assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); - simple_map::add( - &mut properties.map, + for (i in 0..length) { + let key = keys[i]; + assert!(key.length() <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); + properties.map.add( key, - PropertyValue { value: *vector::borrow(&values, i), type: *vector::borrow(&types, i) } + PropertyValue { value: values[i], type: types[i] } ); - i = i + 1; }; properties } @@ -291,19 +288,17 @@ Create property map directly from key and property value keys: vector<String>, values: vector<PropertyValue> ): PropertyMap { - let length = vector::length(&keys); + let length = keys.length(); assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(EPROPERTY_NUMBER_EXCEED_LIMIT)); - assert!(length == vector::length(&values), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); + assert!(length == values.length(), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); let properties = empty(); - let i = 0; - while (i < length) { - let key = *vector::borrow(&keys, i); - let val = *vector::borrow(&values, i); - assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); - add(&mut properties, key, val); - i = i + 1; + for (i in 0..length) { + let key = keys[i]; + let val = values[i]; + assert!(key.length() <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); + properties.add(key, val); }; properties } @@ -345,7 +340,7 @@ Create property map directly from key and property value -
public fun contains_key(map: &property_map::PropertyMap, key: &string::String): bool
+
public fun contains_key(self: &property_map::PropertyMap, key: &string::String): bool
 
@@ -354,8 +349,8 @@ Create property map directly from key and property value Implementation -
public fun contains_key(map: &PropertyMap, key: &String): bool {
-    simple_map::contains_key(&map.map, key)
+
public fun contains_key(self: &PropertyMap, key: &String): bool {
+    self.map.contains_key(key)
 }
 
@@ -369,7 +364,7 @@ Create property map directly from key and property value -
public fun add(map: &mut property_map::PropertyMap, key: string::String, value: property_map::PropertyValue)
+
public fun add(self: &mut property_map::PropertyMap, key: string::String, value: property_map::PropertyValue)
 
@@ -378,10 +373,10 @@ Create property map directly from key and property value Implementation -
public fun add(map: &mut PropertyMap, key: String, value: PropertyValue) {
-    assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG));
-    assert!(simple_map::length(&map.map) < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT));
-    simple_map::add(&mut map.map, key, value);
+
public fun add(self: &mut PropertyMap, key: String, value: PropertyValue) {
+    assert!(key.length() <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG));
+    assert!(self.map.length() < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT));
+    self.map.add(key, value);
 }
 
@@ -395,7 +390,7 @@ Create property map directly from key and property value -
public fun length(map: &property_map::PropertyMap): u64
+
public fun length(self: &property_map::PropertyMap): u64
 
@@ -404,8 +399,8 @@ Create property map directly from key and property value Implementation -
public fun length(map: &PropertyMap): u64 {
-    simple_map::length(&map.map)
+
public fun length(self: &PropertyMap): u64 {
+    self.map.length()
 }
 
@@ -419,7 +414,7 @@ Create property map directly from key and property value -
public fun borrow(map: &property_map::PropertyMap, key: &string::String): &property_map::PropertyValue
+
public fun borrow(self: &property_map::PropertyMap, key: &string::String): &property_map::PropertyValue
 
@@ -428,10 +423,10 @@ Create property map directly from key and property value Implementation -
public fun borrow(map: &PropertyMap, key: &String): &PropertyValue {
-    let found = contains_key(map, key);
+
public fun borrow(self: &PropertyMap, key: &String): &PropertyValue {
+    let found = self.contains_key(key);
     assert!(found, EPROPERTY_NOT_EXIST);
-    simple_map::borrow(&map.map, key)
+    self.map.borrow(key)
 }
 
@@ -446,7 +441,7 @@ Create property map directly from key and property value Return all the keys in the property map in the order they are added. -
public fun keys(map: &property_map::PropertyMap): vector<string::String>
+
public fun keys(self: &property_map::PropertyMap): vector<string::String>
 
@@ -455,8 +450,8 @@ Return all the keys in the property map in the order they are added. Implementation -
public fun keys(map: &PropertyMap): vector<String> {
-    simple_map::keys(&map.map)
+
public fun keys(self: &PropertyMap): vector<String> {
+    self.map.keys()
 }
 
@@ -471,7 +466,7 @@ Return all the keys in the property map in the order they are added. Return the types of all properties in the property map in the order they are added. -
public fun types(map: &property_map::PropertyMap): vector<string::String>
+
public fun types(self: &property_map::PropertyMap): vector<string::String>
 
@@ -480,9 +475,8 @@ Return the types of all properties in the property map in the order they are add Implementation -
public fun types(map: &PropertyMap): vector<String> {
-    vector::map_ref(&simple_map::values(&map.map), |v| {
-        let v: &PropertyValue = v;
+
public fun types(self: &PropertyMap): vector<String> {
+    self.map.values().map_ref(|v| {
         v.type
     })
 }
@@ -499,7 +493,7 @@ Return the types of all properties in the property map in the order they are add
 Return the values of all properties in the property map in the order they are added.
 
 
-
public fun values(map: &property_map::PropertyMap): vector<vector<u8>>
+
public fun values(self: &property_map::PropertyMap): vector<vector<u8>>
 
@@ -508,9 +502,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun values(map: &PropertyMap): vector<vector<u8>> {
-    vector::map_ref(&simple_map::values(&map.map), |v| {
-        let v: &PropertyValue = v;
+
public fun values(self: &PropertyMap): vector<vector<u8>> {
+    self.map.values().map_ref(|v| {
         v.value
     })
 }
@@ -526,7 +519,7 @@ Return the values of all properties in the property map in the order they are ad
 
 
 
-
public fun read_string(map: &property_map::PropertyMap, key: &string::String): string::String
+
public fun read_string(self: &property_map::PropertyMap, key: &string::String): string::String
 
@@ -535,8 +528,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun read_string(map: &PropertyMap, key: &String): String {
-    let prop = borrow(map, key);
+
public fun read_string(self: &PropertyMap, key: &String): String {
+    let prop = self.borrow(key);
     assert!(prop.type == string::utf8(b"0x1::string::String"), error::invalid_state(ETYPE_NOT_MATCH));
     from_bcs::to_string(prop.value)
 }
@@ -552,7 +545,7 @@ Return the values of all properties in the property map in the order they are ad
 
 
 
-
public fun read_u8(map: &property_map::PropertyMap, key: &string::String): u8
+
public fun read_u8(self: &property_map::PropertyMap, key: &string::String): u8
 
@@ -561,8 +554,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun read_u8(map: &PropertyMap, key: &String): u8 {
-    let prop = borrow(map, key);
+
public fun read_u8(self: &PropertyMap, key: &String): u8 {
+    let prop = self.borrow(key);
     assert!(prop.type == string::utf8(b"u8"), error::invalid_state(ETYPE_NOT_MATCH));
     from_bcs::to_u8(prop.value)
 }
@@ -578,7 +571,7 @@ Return the values of all properties in the property map in the order they are ad
 
 
 
-
public fun read_u64(map: &property_map::PropertyMap, key: &string::String): u64
+
public fun read_u64(self: &property_map::PropertyMap, key: &string::String): u64
 
@@ -587,8 +580,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun read_u64(map: &PropertyMap, key: &String): u64 {
-    let prop = borrow(map, key);
+
public fun read_u64(self: &PropertyMap, key: &String): u64 {
+    let prop = self.borrow(key);
     assert!(prop.type == string::utf8(b"u64"), error::invalid_state(ETYPE_NOT_MATCH));
     from_bcs::to_u64(prop.value)
 }
@@ -604,7 +597,7 @@ Return the values of all properties in the property map in the order they are ad
 
 
 
-
public fun read_address(map: &property_map::PropertyMap, key: &string::String): address
+
public fun read_address(self: &property_map::PropertyMap, key: &string::String): address
 
@@ -613,8 +606,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun read_address(map: &PropertyMap, key: &String): address {
-    let prop = borrow(map, key);
+
public fun read_address(self: &PropertyMap, key: &String): address {
+    let prop = self.borrow(key);
     assert!(prop.type == string::utf8(b"address"), error::invalid_state(ETYPE_NOT_MATCH));
     from_bcs::to_address(prop.value)
 }
@@ -630,7 +623,7 @@ Return the values of all properties in the property map in the order they are ad
 
 
 
-
public fun read_u128(map: &property_map::PropertyMap, key: &string::String): u128
+
public fun read_u128(self: &property_map::PropertyMap, key: &string::String): u128
 
@@ -639,8 +632,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun read_u128(map: &PropertyMap, key: &String): u128 {
-    let prop = borrow(map, key);
+
public fun read_u128(self: &PropertyMap, key: &String): u128 {
+    let prop = self.borrow(key);
     assert!(prop.type == string::utf8(b"u128"), error::invalid_state(ETYPE_NOT_MATCH));
     from_bcs::to_u128(prop.value)
 }
@@ -656,7 +649,7 @@ Return the values of all properties in the property map in the order they are ad
 
 
 
-
public fun read_bool(map: &property_map::PropertyMap, key: &string::String): bool
+
public fun read_bool(self: &property_map::PropertyMap, key: &string::String): bool
 
@@ -665,8 +658,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun read_bool(map: &PropertyMap, key: &String): bool {
-    let prop = borrow(map, key);
+
public fun read_bool(self: &PropertyMap, key: &String): bool {
+    let prop = self.borrow(key);
     assert!(prop.type == string::utf8(b"bool"), error::invalid_state(ETYPE_NOT_MATCH));
     from_bcs::to_bool(prop.value)
 }
@@ -682,7 +675,7 @@ Return the values of all properties in the property map in the order they are ad
 
 
 
-
public fun borrow_value(property: &property_map::PropertyValue): vector<u8>
+
public fun borrow_value(self: &property_map::PropertyValue): vector<u8>
 
@@ -691,8 +684,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun borrow_value(property: &PropertyValue): vector<u8> {
-    property.value
+
public fun borrow_value(self: &PropertyValue): vector<u8> {
+    self.value
 }
 
@@ -706,7 +699,7 @@ Return the values of all properties in the property map in the order they are ad -
public fun borrow_type(property: &property_map::PropertyValue): string::String
+
public fun borrow_type(self: &property_map::PropertyValue): string::String
 
@@ -715,8 +708,8 @@ Return the values of all properties in the property map in the order they are ad Implementation -
public fun borrow_type(property: &PropertyValue): String {
-    property.type
+
public fun borrow_type(self: &PropertyValue): String {
+    self.type
 }
 
@@ -730,7 +723,7 @@ Return the values of all properties in the property map in the order they are ad -
public fun remove(map: &mut property_map::PropertyMap, key: &string::String): (string::String, property_map::PropertyValue)
+
public fun remove(self: &mut property_map::PropertyMap, key: &string::String): (string::String, property_map::PropertyValue)
 
@@ -740,12 +733,12 @@ Return the values of all properties in the property map in the order they are ad
public fun remove(
-    map: &mut PropertyMap,
+    self: &mut PropertyMap,
     key: &String
 ): (String, PropertyValue) {
-    let found = contains_key(map, key);
+    let found = self.contains_key(key);
     assert!(found, error::not_found(EPROPERTY_NOT_EXIST));
-    simple_map::remove(&mut map.map, key)
+    self.map.remove(key)
 }
 
@@ -761,7 +754,7 @@ Update the property in the existing property map Allow updating existing keys' value and add new key-value pairs -
public fun update_property_map(map: &mut property_map::PropertyMap, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
public fun update_property_map(self: &mut property_map::PropertyMap, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
 
@@ -771,30 +764,28 @@ Allow updating existing keys' value and add new key-value pairs
public fun update_property_map(
-    map: &mut PropertyMap,
+    self: &mut PropertyMap,
     keys: vector<String>,
     values: vector<vector<u8>>,
     types: vector<String>,
 ) {
-    let key_len = vector::length(&keys);
-    let val_len = vector::length(&values);
-    let typ_len = vector::length(&types);
+    let key_len = keys.length();
+    let val_len = values.length();
+    let typ_len = types.length();
     assert!(key_len == val_len, error::invalid_state(EKEY_COUNT_NOT_MATCH_VALUE_COUNT));
     assert!(key_len == typ_len, error::invalid_state(EKEY_COUNT_NOT_MATCH_TYPE_COUNT));
 
-    let i = 0;
-    while (i < key_len) {
-        let key = vector::borrow(&keys, i);
+    for (i in 0..key_len) {
+        let key = &keys[i];
         let prop_val = PropertyValue {
-            value: *vector::borrow(&values, i),
-            type: *vector::borrow(&types, i),
+            value: values[i],
+            type: types[i],
         };
-        if (contains_key(map, key)) {
-            update_property_value(map, key, prop_val);
+        if (self.contains_key(key)) {
+            self.update_property_value(key, prop_val);
         } else {
-            add(map, *key, prop_val);
+            self.add(*key, prop_val);
         };
-        i = i + 1;
     }
 }
 
@@ -809,7 +800,7 @@ Allow updating existing keys' value and add new key-value pairs -
public fun update_property_value(map: &mut property_map::PropertyMap, key: &string::String, value: property_map::PropertyValue)
+
public fun update_property_value(self: &mut property_map::PropertyMap, key: &string::String, value: property_map::PropertyValue)
 
@@ -819,11 +810,11 @@ Allow updating existing keys' value and add new key-value pairs
public fun update_property_value(
-    map: &mut PropertyMap,
+    self: &mut PropertyMap,
     key: &String,
     value: PropertyValue
 ) {
-    let property_val = simple_map::borrow_mut(&mut map.map, key);
+    let property_val = self.map.borrow_mut(key);
     *property_val = value;
 }
 
@@ -908,7 +899,7 @@ create a property value from generic type data
pragma verify = true;
 pragma aborts_if_is_strict;
 let MAX_PROPERTY_MAP_SIZE = 1000;
-let MAX_PROPERTY_NAME_LENGTH  = 128;
+let MAX_PROPERTY_NAME_LENGTH = 128;
 
@@ -927,8 +918,8 @@ create a property value from generic type data
pragma aborts_if_is_partial;
 let length = len(keys);
 aborts_if !(length <= MAX_PROPERTY_MAP_SIZE);
-aborts_if !(length == vector::length(values));
-aborts_if !(length == vector::length(types));
+aborts_if !(length == len(values));
+aborts_if !(length == len(types));
 
@@ -945,7 +936,7 @@ create a property value from generic type data
pragma aborts_if_is_partial;
-let length = vector::length(keys);
+let length = len(keys);
 aborts_if !(length <= MAX_PROPERTY_MAP_SIZE);
 aborts_if !(length == len(values));
 
@@ -973,7 +964,7 @@ create a property value from generic type data ### Function `contains_key` -
public fun contains_key(map: &property_map::PropertyMap, key: &string::String): bool
+
public fun contains_key(self: &property_map::PropertyMap, key: &string::String): bool
 
@@ -989,15 +980,15 @@ create a property value from generic type data ### Function `add` -
public fun add(map: &mut property_map::PropertyMap, key: string::String, value: property_map::PropertyValue)
+
public fun add(self: &mut property_map::PropertyMap, key: string::String, value: property_map::PropertyValue)
 
-
aborts_if !(string::length(key) <= MAX_PROPERTY_NAME_LENGTH);
-aborts_if !(!simple_map::spec_contains_key(map.map, key));
-aborts_if !(simple_map::spec_len(map.map) < MAX_PROPERTY_MAP_SIZE);
+
aborts_if !(key.length() <= MAX_PROPERTY_NAME_LENGTH);
+aborts_if !(!simple_map::spec_contains_key(self.map, key));
+aborts_if !(simple_map::spec_len(self.map) < MAX_PROPERTY_MAP_SIZE);
 
@@ -1007,7 +998,7 @@ create a property value from generic type data ### Function `length` -
public fun length(map: &property_map::PropertyMap): u64
+
public fun length(self: &property_map::PropertyMap): u64
 
@@ -1023,13 +1014,13 @@ create a property value from generic type data ### Function `borrow` -
public fun borrow(map: &property_map::PropertyMap, key: &string::String): &property_map::PropertyValue
+
public fun borrow(self: &property_map::PropertyMap, key: &string::String): &property_map::PropertyValue
 
-
aborts_if !simple_map::spec_contains_key(map.map, key);
+
aborts_if !simple_map::spec_contains_key(self.map, key);
 
@@ -1039,7 +1030,7 @@ create a property value from generic type data ### Function `keys` -
public fun keys(map: &property_map::PropertyMap): vector<string::String>
+
public fun keys(self: &property_map::PropertyMap): vector<string::String>
 
@@ -1055,7 +1046,7 @@ create a property value from generic type data ### Function `types` -
public fun types(map: &property_map::PropertyMap): vector<string::String>
+
public fun types(self: &property_map::PropertyMap): vector<string::String>
 
@@ -1071,7 +1062,7 @@ create a property value from generic type data ### Function `values` -
public fun values(map: &property_map::PropertyMap): vector<vector<u8>>
+
public fun values(self: &property_map::PropertyMap): vector<vector<u8>>
 
@@ -1087,7 +1078,7 @@ create a property value from generic type data ### Function `read_string` -
public fun read_string(map: &property_map::PropertyMap, key: &string::String): string::String
+
public fun read_string(self: &property_map::PropertyMap, key: &string::String): string::String
 
@@ -1096,9 +1087,9 @@ to prop.type
pragma aborts_if_is_partial;
-aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !simple_map::spec_contains_key(self.map, key);
 aborts_if !string::spec_internal_check_utf8(b"0x1::string::String");
-let prop = simple_map::spec_get(map.map, key);
+let prop = simple_map::spec_get(self.map, key);
 aborts_if prop.type != spec_utf8(b"0x1::string::String");
 aborts_if !aptos_std::from_bcs::deserializable<String>(prop.value);
 
@@ -1110,7 +1101,7 @@ to prop.type
fun spec_utf8(bytes: vector<u8>): String {
-   String{bytes}
+   String { bytes }
 }
 
@@ -1121,16 +1112,16 @@ to prop.type ### Function `read_u8` -
public fun read_u8(map: &property_map::PropertyMap, key: &string::String): u8
+
public fun read_u8(self: &property_map::PropertyMap, key: &string::String): u8
 
let str = b"u8";
-aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !simple_map::spec_contains_key(self.map, key);
 aborts_if !string::spec_internal_check_utf8(str);
-let prop = simple_map::spec_get(map.map, key);
+let prop = simple_map::spec_get(self.map, key);
 aborts_if prop.type != spec_utf8(str);
 aborts_if !aptos_std::from_bcs::deserializable<u8>(prop.value);
 
@@ -1142,16 +1133,16 @@ to prop.type ### Function `read_u64` -
public fun read_u64(map: &property_map::PropertyMap, key: &string::String): u64
+
public fun read_u64(self: &property_map::PropertyMap, key: &string::String): u64
 
let str = b"u64";
-aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !simple_map::spec_contains_key(self.map, key);
 aborts_if !string::spec_internal_check_utf8(str);
-let prop = simple_map::spec_get(map.map, key);
+let prop = simple_map::spec_get(self.map, key);
 aborts_if prop.type != spec_utf8(str);
 aborts_if !aptos_std::from_bcs::deserializable<u64>(prop.value);
 
@@ -1163,16 +1154,16 @@ to prop.type ### Function `read_address` -
public fun read_address(map: &property_map::PropertyMap, key: &string::String): address
+
public fun read_address(self: &property_map::PropertyMap, key: &string::String): address
 
let str = b"address";
-aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !simple_map::spec_contains_key(self.map, key);
 aborts_if !string::spec_internal_check_utf8(str);
-let prop = simple_map::spec_get(map.map, key);
+let prop = simple_map::spec_get(self.map, key);
 aborts_if prop.type != spec_utf8(str);
 aborts_if !aptos_std::from_bcs::deserializable<address>(prop.value);
 
@@ -1184,16 +1175,16 @@ to prop.type ### Function `read_u128` -
public fun read_u128(map: &property_map::PropertyMap, key: &string::String): u128
+
public fun read_u128(self: &property_map::PropertyMap, key: &string::String): u128
 
let str = b"u128";
-aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !simple_map::spec_contains_key(self.map, key);
 aborts_if !string::spec_internal_check_utf8(str);
-let prop = simple_map::spec_get(map.map, key);
+let prop = simple_map::spec_get(self.map, key);
 aborts_if prop.type != spec_utf8(str);
 aborts_if !aptos_std::from_bcs::deserializable<u128>(prop.value);
 
@@ -1205,16 +1196,16 @@ to prop.type ### Function `read_bool` -
public fun read_bool(map: &property_map::PropertyMap, key: &string::String): bool
+
public fun read_bool(self: &property_map::PropertyMap, key: &string::String): bool
 
let str = b"bool";
-aborts_if !simple_map::spec_contains_key(map.map, key);
+aborts_if !simple_map::spec_contains_key(self.map, key);
 aborts_if !string::spec_internal_check_utf8(str);
-let prop = simple_map::spec_get(map.map, key);
+let prop = simple_map::spec_get(self.map, key);
 aborts_if prop.type != spec_utf8(str);
 aborts_if !aptos_std::from_bcs::deserializable<bool>(prop.value);
 
@@ -1226,7 +1217,7 @@ to prop.type ### Function `borrow_value` -
public fun borrow_value(property: &property_map::PropertyValue): vector<u8>
+
public fun borrow_value(self: &property_map::PropertyValue): vector<u8>
 
@@ -1242,7 +1233,7 @@ to prop.type ### Function `borrow_type` -
public fun borrow_type(property: &property_map::PropertyValue): string::String
+
public fun borrow_type(self: &property_map::PropertyValue): string::String
 
@@ -1258,13 +1249,13 @@ to prop.type ### Function `remove` -
public fun remove(map: &mut property_map::PropertyMap, key: &string::String): (string::String, property_map::PropertyValue)
+
public fun remove(self: &mut property_map::PropertyMap, key: &string::String): (string::String, property_map::PropertyValue)
 
-
aborts_if !simple_map::spec_contains_key(map.map, key);
+
aborts_if !simple_map::spec_contains_key(self.map, key);
 
@@ -1274,7 +1265,7 @@ to prop.type ### Function `update_property_map` -
public fun update_property_map(map: &mut property_map::PropertyMap, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
+
public fun update_property_map(self: &mut property_map::PropertyMap, keys: vector<string::String>, values: vector<vector<u8>>, types: vector<string::String>)
 
@@ -1295,13 +1286,13 @@ to prop.type ### Function `update_property_value` -
public fun update_property_value(map: &mut property_map::PropertyMap, key: &string::String, value: property_map::PropertyValue)
+
public fun update_property_value(self: &mut property_map::PropertyMap, key: &string::String, value: property_map::PropertyValue)
 
-
aborts_if !simple_map::spec_contains_key(map.map, key);
+
aborts_if !simple_map::spec_contains_key(self.map, key);
 
diff --git a/aptos-move/framework/aptos-token/doc/token.md b/aptos-move/framework/aptos-token/doc/token.md index af496bd937b73..bc568d413cbc8 100644 --- a/aptos-move/framework/aptos-token/doc/token.md +++ b/aptos-move/framework/aptos-token/doc/token.md @@ -19,17 +19,23 @@ Checkout our developer doc on our token standard https://aptos.dev/standards - [Struct `CollectionData`](#0x3_token_CollectionData) - [Struct `WithdrawCapability`](#0x3_token_WithdrawCapability) - [Struct `DepositEvent`](#0x3_token_DepositEvent) +- [Struct `TokenDeposit`](#0x3_token_TokenDeposit) - [Struct `Deposit`](#0x3_token_Deposit) - [Struct `WithdrawEvent`](#0x3_token_WithdrawEvent) - [Struct `Withdraw`](#0x3_token_Withdraw) +- [Struct `TokenWithdraw`](#0x3_token_TokenWithdraw) - [Struct `CreateTokenDataEvent`](#0x3_token_CreateTokenDataEvent) - [Struct `CreateTokenData`](#0x3_token_CreateTokenData) +- [Struct `TokenDataCreation`](#0x3_token_TokenDataCreation) - [Struct `MintTokenEvent`](#0x3_token_MintTokenEvent) - [Struct `MintToken`](#0x3_token_MintToken) +- [Struct `Mint`](#0x3_token_Mint) - [Struct `BurnTokenEvent`](#0x3_token_BurnTokenEvent) - [Struct `BurnToken`](#0x3_token_BurnToken) +- [Struct `Burn`](#0x3_token_Burn) - [Struct `MutateTokenPropertyMapEvent`](#0x3_token_MutateTokenPropertyMapEvent) - [Struct `MutateTokenPropertyMap`](#0x3_token_MutateTokenPropertyMap) +- [Struct `MutatePropertyMap`](#0x3_token_MutatePropertyMap) - [Struct `CreateCollectionEvent`](#0x3_token_CreateCollectionEvent) - [Struct `CreateCollection`](#0x3_token_CreateCollection) - [Constants](#@Constants_0) @@ -768,6 +774,47 @@ Set of data sent to the event stream during a receive + + + + +## Struct `TokenDeposit` + +Set of data sent to the event stream during a receive + + +
#[event]
+struct TokenDeposit has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ +
@@ -778,6 +825,7 @@ Set of data sent to the event stream during a receive
#[event]
+#[deprecated]
 struct Deposit has drop, store
 
@@ -847,6 +895,7 @@ Set of data sent to the event stream during a withdrawal
#[event]
+#[deprecated]
 struct Withdraw has drop, store
 
@@ -872,6 +921,47 @@ Set of data sent to the event stream during a withdrawal + + + + +## Struct `TokenWithdraw` + +Set of data sent to the event stream during a withdrawal + + +
#[event]
+struct TokenWithdraw has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ +
@@ -975,6 +1065,7 @@ token creation event id of token created
#[event]
+#[deprecated]
 struct CreateTokenData has drop, store
 
@@ -1060,6 +1151,106 @@ token creation event id of token created + + + + +## Struct `TokenDataCreation` + + + +
#[event]
+struct TokenDataCreation has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+id: token::TokenDataId +
+
+ +
+
+description: string::String +
+
+ +
+
+maximum: u64 +
+
+ +
+
+uri: string::String +
+
+ +
+
+royalty_payee_address: address +
+
+ +
+
+royalty_points_denominator: u64 +
+
+ +
+
+royalty_points_numerator: u64 +
+
+ +
+
+name: string::String +
+
+ +
+
+mutability_config: token::TokenMutabilityConfig +
+
+ +
+
+property_keys: vector<string::String> +
+
+ +
+
+property_values: vector<vector<u8>> +
+
+ +
+
+property_types: vector<string::String> +
+
+ +
+
+ +
@@ -1103,6 +1294,7 @@ mint token event. This event triggered when creator adds more supply to existing
#[event]
+#[deprecated]
 struct MintToken has drop, store
 
@@ -1128,6 +1320,46 @@ mint token event. This event triggered when creator adds more supply to existing + + + + +## Struct `Mint` + + + +
#[event]
+struct Mint has drop, store
+
+ + + +
+Fields + + +
+
+creator: address +
+
+ +
+
+id: token::TokenDataId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ +
@@ -1170,6 +1402,7 @@ mint token event. This event triggered when creator adds more supply to existing
#[event]
+#[deprecated]
 struct BurnToken has drop, store
 
@@ -1195,6 +1428,46 @@ mint token event. This event triggered when creator adds more supply to existing + + + + +## Struct `Burn` + + + +
#[event]
+struct Burn has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ +
@@ -1255,6 +1528,7 @@ mint token event. This event triggered when creator adds more supply to existing
#[event]
+#[deprecated]
 struct MutateTokenPropertyMap has drop, store
 
@@ -1298,6 +1572,64 @@ mint token event. This event triggered when creator adds more supply to existing + + + + +## Struct `MutatePropertyMap` + + + +
#[event]
+struct MutatePropertyMap has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+old_id: token::TokenId +
+
+ +
+
+new_id: token::TokenId +
+
+ +
+
+keys: vector<string::String> +
+
+ +
+
+values: vector<vector<u8>> +
+
+ +
+
+types: vector<string::String> +
+
+ +
+
+ +
@@ -2078,7 +2410,6 @@ if the token property_version is not 0, we will just update the propertyMap and types: vector<String>, ) acquires Collections, TokenStore { assert!(signer::address_of(account) == creator, error::not_found(ENO_MUTATE_CAPABILITY)); - let i = 0; let token_id = create_token_id_raw( creator, collection_name, @@ -2086,9 +2417,8 @@ if the token property_version is not 0, we will just update the propertyMap and token_property_version, ); // give a new property_version for each token - while (i < amount) { + for (i in 0..amount) { mutate_one_token(account, token_owner, token_id, keys, values, types); - i = i + 1; }; }
@@ -2148,7 +2478,7 @@ if the token property_version is not 0, we will just update the propertyMap and
public entry fun opt_in_direct_transfer(account: &signer, opt_in: bool) acquires TokenStore {
     let addr = signer::address_of(account);
     initialize_token_store(account);
-    let opt_in_flag = &mut borrow_global_mut<TokenStore>(addr).direct_transfer;
+    let opt_in_flag = &mut TokenStore[addr].direct_transfer;
     *opt_in_flag = opt_in;
     token_event_store::emit_token_opt_in_event(account, opt_in);
 }
@@ -2227,54 +2557,49 @@ The token is owned at address owner
         error::not_found(ECOLLECTIONS_NOT_PUBLISHED),
     );
 
-    let collections = borrow_global_mut<Collections>(creator_address);
+    let collections = &mut Collections[creator_address];
     assert!(
-        table::contains(&collections.token_data, token_id.token_data_id),
+        collections.token_data.contains(token_id.token_data_id),
         error::not_found(ETOKEN_DATA_NOT_PUBLISHED),
     );
 
-    let token_data = table::borrow_mut(
-        &mut collections.token_data,
-        token_id.token_data_id,
-    );
+    let token_data = collections.token_data.borrow_mut(token_id.token_data_id);
 
     // The property should be explicitly set in the property_map for creator to burn the token
     assert!(
-        property_map::contains_key(&token_data.default_properties, &string::utf8(BURNABLE_BY_CREATOR)),
+        token_data.default_properties.contains_key(&string::utf8(BURNABLE_BY_CREATOR)),
         error::permission_denied(ECREATOR_CANNOT_BURN_TOKEN)
     );
 
-    let burn_by_creator_flag = property_map::read_bool(&token_data.default_properties, &string::utf8(BURNABLE_BY_CREATOR));
+    let burn_by_creator_flag = token_data.default_properties.read_bool(&string::utf8(BURNABLE_BY_CREATOR));
     assert!(burn_by_creator_flag, error::permission_denied(ECREATOR_CANNOT_BURN_TOKEN));
 
-    // Burn the tokens.
+    // Burn the tokens.
     let Token { id: _, amount: burned_amount, token_properties: _ } = withdraw_with_event_internal(owner, token_id, amount);
-    let token_store = borrow_global_mut<TokenStore>(owner);
+    let token_store = &mut TokenStore[owner];
     if (std::features::module_event_migration_enabled()) {
-        event::emit(BurnToken { id: token_id, amount: burned_amount });
+        event::emit(Burn { account: owner, id: token_id, amount: burned_amount });
+    } else {
+        event::emit_event<BurnTokenEvent>(
+            &mut token_store.burn_events,
+            BurnTokenEvent { id: token_id, amount: burned_amount }
+        );
     };
-    event::emit_event<BurnTokenEvent>(
-        &mut token_store.burn_events,
-        BurnTokenEvent { id: token_id, amount: burned_amount }
-    );
 
     if (token_data.maximum > 0) {
-        token_data.supply = token_data.supply - burned_amount;
+        token_data.supply -= burned_amount;
 
         // Delete the token_data if supply drops to 0.
         if (token_data.supply == 0) {
-            destroy_token_data(table::remove(&mut collections.token_data, token_id.token_data_id));
+            destroy_token_data(collections.token_data.remove(token_id.token_data_id));
 
             // update the collection supply
-            let collection_data = table::borrow_mut(
-                &mut collections.collection_data,
-                token_id.token_data_id.collection
-            );
+            let collection_data = collections.collection_data.borrow_mut(token_id.token_data_id.collection);
             if (collection_data.maximum > 0) {
-                collection_data.supply = collection_data.supply - 1;
+                collection_data.supply -= 1;
                 // delete the collection data if the collection supply equals 0
                 if (collection_data.supply == 0) {
-                    destroy_collection_data(table::remove(&mut collections.collection_data, collection_data.name));
+                    destroy_collection_data(collections.collection_data.remove(collection_data.name));
                 };
             };
         };
@@ -2318,62 +2643,54 @@ Burn a token by the token owner
         error::not_found(ECOLLECTIONS_NOT_PUBLISHED),
     );
 
-    let collections = borrow_global_mut<Collections>(creator_addr);
+    let collections = &mut Collections[creator_addr];
     assert!(
-        table::contains(&collections.token_data, token_id.token_data_id),
+        collections.token_data.contains(token_id.token_data_id),
         error::not_found(ETOKEN_DATA_NOT_PUBLISHED),
     );
 
-    let token_data = table::borrow_mut(
-        &mut collections.token_data,
-        token_id.token_data_id,
-    );
+    let token_data = collections.token_data.borrow_mut(token_id.token_data_id);
 
     assert!(
-        property_map::contains_key(&token_data.default_properties, &string::utf8(BURNABLE_BY_OWNER)),
+        token_data.default_properties.contains_key(&string::utf8(BURNABLE_BY_OWNER)),
         error::permission_denied(EOWNER_CANNOT_BURN_TOKEN)
     );
-    let burn_by_owner_flag = property_map::read_bool(&token_data.default_properties, &string::utf8(BURNABLE_BY_OWNER));
+    let burn_by_owner_flag = token_data.default_properties.read_bool(&string::utf8(BURNABLE_BY_OWNER));
     assert!(burn_by_owner_flag, error::permission_denied(EOWNER_CANNOT_BURN_TOKEN));
 
-    // Burn the tokens.
+    // Burn the tokens.
     let Token { id: _, amount: burned_amount, token_properties: _ } = withdraw_token(owner, token_id, amount);
-    let token_store = borrow_global_mut<TokenStore>(signer::address_of(owner));
+    let token_store = &mut TokenStore[signer::address_of(owner)];
     if (std::features::module_event_migration_enabled()) {
-        event::emit(BurnToken { id: token_id, amount: burned_amount });
+        event::emit(Burn { account: signer::address_of(owner), id: token_id, amount: burned_amount });
+    } else {
+        event::emit_event<BurnTokenEvent>(
+            &mut token_store.burn_events,
+            BurnTokenEvent { id: token_id, amount: burned_amount }
+        );
     };
-    event::emit_event<BurnTokenEvent>(
-        &mut token_store.burn_events,
-        BurnTokenEvent { id: token_id, amount: burned_amount }
-    );
 
     // Decrease the supply correspondingly by the amount of tokens burned.
-    let token_data = table::borrow_mut(
-        &mut collections.token_data,
-        token_id.token_data_id,
-    );
+    let token_data = collections.token_data.borrow_mut(token_id.token_data_id);
 
     // only update the supply if we tracking the supply and maximal
     // maximal == 0 is reserved for unlimited token and collection with no tracking info.
     if (token_data.maximum > 0) {
-        token_data.supply = token_data.supply - burned_amount;
+        token_data.supply -= burned_amount;
 
         // Delete the token_data if supply drops to 0.
         if (token_data.supply == 0) {
-            destroy_token_data(table::remove(&mut collections.token_data, token_id.token_data_id));
+            destroy_token_data(collections.token_data.remove(token_id.token_data_id));
 
             // update the collection supply
-            let collection_data = table::borrow_mut(
-                &mut collections.collection_data,
-                token_id.token_data_id.collection
-            );
+            let collection_data = collections.collection_data.borrow_mut(token_id.token_data_id.collection);
 
             // only update and check the supply for unlimited collection
             if (collection_data.maximum > 0){
-                collection_data.supply = collection_data.supply - 1;
+                collection_data.supply -= 1;
                 // delete the collection data if the collection supply equals 0
                 if (collection_data.supply == 0) {
-                    destroy_collection_data(table::remove(&mut collections.collection_data, collection_data.name));
+                    destroy_collection_data(collections.collection_data.remove(collection_data.name));
                 };
             };
         };
@@ -2403,7 +2720,9 @@ Burn a token by the token owner
 
public fun mutate_collection_description(creator: &signer, collection_name: String, description: String) acquires Collections {
     let creator_address = signer::address_of(creator);
     assert_collection_exists(creator_address, collection_name);
-    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    let collection_data = Collections[creator_address].collection_data.borrow_mut(
+        collection_name
+    );
     assert!(collection_data.mutability_config.description, error::permission_denied(EFIELD_NOT_MUTABLE));
     token_event_store::emit_collection_description_mutate_event(creator, collection_name, collection_data.description, description);
     collection_data.description = description;
@@ -2430,10 +2749,12 @@ Burn a token by the token owner
 
 
 
public fun mutate_collection_uri(creator: &signer, collection_name: String, uri: String) acquires Collections {
-    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG));
+    assert!(uri.length() <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG));
     let creator_address = signer::address_of(creator);
     assert_collection_exists(creator_address, collection_name);
-    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    let collection_data = Collections[creator_address].collection_data.borrow_mut(
+        collection_name
+    );
     assert!(collection_data.mutability_config.uri, error::permission_denied(EFIELD_NOT_MUTABLE));
     token_event_store::emit_collection_uri_mutate_event(creator, collection_name, collection_data.uri , uri);
     collection_data.uri = uri;
@@ -2462,7 +2783,9 @@ Burn a token by the token owner
 
public fun mutate_collection_maximum(creator: &signer, collection_name: String, maximum: u64) acquires Collections {
     let creator_address = signer::address_of(creator);
     assert_collection_exists(creator_address, collection_name);
-    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    let collection_data = Collections[creator_address].collection_data.borrow_mut(
+        collection_name
+    );
     // cannot change maximum from 0 and cannot change maximum to 0
     assert!(collection_data.maximum != 0 && maximum != 0, error::invalid_argument(EINVALID_MAXIMUM));
     assert!(maximum >= collection_data.supply, error::invalid_argument(EINVALID_MAXIMUM));
@@ -2493,8 +2816,8 @@ Burn a token by the token owner
 
 
public fun mutate_tokendata_maximum(creator: &signer, token_data_id: TokenDataId, maximum: u64) acquires Collections {
     assert_tokendata_exists(creator, token_data_id);
-    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
-    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    let all_token_data = &mut Collections[token_data_id.creator].token_data;
+    let token_data = all_token_data.borrow_mut(token_data_id);
     // cannot change maximum from 0 and cannot change maximum to 0
     assert!(token_data.maximum != 0 && maximum != 0, error::invalid_argument(EINVALID_MAXIMUM));
     assert!(maximum >= token_data.supply, error::invalid_argument(EINVALID_MAXIMUM));
@@ -2528,11 +2851,11 @@ Burn a token by the token owner
     token_data_id: TokenDataId,
     uri: String
 ) acquires Collections {
-    assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG));
+    assert!(uri.length() <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG));
     assert_tokendata_exists(creator, token_data_id);
 
-    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
-    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    let all_token_data = &mut Collections[token_data_id.creator].token_data;
+    let token_data = all_token_data.borrow_mut(token_data_id);
     assert!(token_data.mutability_config.uri, error::permission_denied(EFIELD_NOT_MUTABLE));
     token_event_store::emit_token_uri_mutate_event(creator, token_data_id.collection, token_data_id.name, token_data.uri ,uri);
     token_data.uri = uri;
@@ -2561,8 +2884,8 @@ Burn a token by the token owner
 
public fun mutate_tokendata_royalty(creator: &signer, token_data_id: TokenDataId, royalty: Royalty) acquires Collections {
     assert_tokendata_exists(creator, token_data_id);
 
-    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
-    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    let all_token_data = &mut Collections[token_data_id.creator].token_data;
+    let token_data = all_token_data.borrow_mut(token_data_id);
     assert!(token_data.mutability_config.royalty, error::permission_denied(EFIELD_NOT_MUTABLE));
 
     token_event_store::emit_token_royalty_mutate_event(
@@ -2602,8 +2925,8 @@ Burn a token by the token owner
 
public fun mutate_tokendata_description(creator: &signer, token_data_id: TokenDataId, description: String) acquires Collections {
     assert_tokendata_exists(creator, token_data_id);
 
-    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
-    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    let all_token_data = &mut Collections[token_data_id.creator].token_data;
+    let token_data = all_token_data.borrow_mut(token_data_id);
     assert!(token_data.mutability_config.description, error::permission_denied(EFIELD_NOT_MUTABLE));
     token_event_store::emit_token_descrition_mutate_event(creator, token_data_id.collection, token_data_id.name, token_data.description, description);
     token_data.description = description;
@@ -2638,35 +2961,33 @@ Allow creator to mutate the default properties in TokenData
     types: vector<String>,
 ) acquires Collections {
     assert_tokendata_exists(creator, token_data_id);
-    let key_len = vector::length(&keys);
-    let val_len = vector::length(&values);
-    let typ_len = vector::length(&types);
+    let key_len = keys.length();
+    let val_len = values.length();
+    let typ_len = types.length();
     assert!(key_len == val_len, error::invalid_state(ETOKEN_PROPERTIES_COUNT_NOT_MATCH));
     assert!(key_len == typ_len, error::invalid_state(ETOKEN_PROPERTIES_COUNT_NOT_MATCH));
 
-    let all_token_data = &mut borrow_global_mut<Collections>(token_data_id.creator).token_data;
-    let token_data = table::borrow_mut(all_token_data, token_data_id);
+    let all_token_data = &mut Collections[token_data_id.creator].token_data;
+    let token_data = all_token_data.borrow_mut(token_data_id);
     assert!(token_data.mutability_config.properties, error::permission_denied(EFIELD_NOT_MUTABLE));
-    let i: u64 = 0;
     let old_values: vector<Option<PropertyValue>> = vector::empty();
     let new_values: vector<PropertyValue> = vector::empty();
     assert_non_standard_reserved_property(&keys);
-    while (i < vector::length(&keys)){
-        let key = vector::borrow(&keys, i);
-        let old_pv = if (property_map::contains_key(&token_data.default_properties, key)) {
-            option::some(*property_map::borrow(&token_data.default_properties, key))
+    for (i in 0..keys.length()){
+        let key = keys.borrow(i);
+        let old_pv = if (token_data.default_properties.contains_key(key)) {
+            option::some(*token_data.default_properties.borrow(key))
         } else {
             option::none<PropertyValue>()
         };
-        vector::push_back(&mut old_values, old_pv);
-        let new_pv = property_map::create_property_value_raw(*vector::borrow(&values, i), *vector::borrow(&types, i));
-        vector::push_back(&mut new_values, new_pv);
-        if (option::is_some(&old_pv)) {
-            property_map::update_property_value(&mut token_data.default_properties, key, new_pv);
+        old_values.push_back(old_pv);
+        let new_pv = property_map::create_property_value_raw(values[i], types[i]);
+        new_values.push_back(new_pv);
+        if (old_pv.is_some()) {
+            token_data.default_properties.update_property_value(key, new_pv);
         } else {
-            property_map::add(&mut token_data.default_properties, *key, new_pv);
+            token_data.default_properties.add(*key, new_pv);
         };
-        i = i + 1;
     };
     token_event_store::emit_default_property_mutate_event(creator, token_data_id.collection, token_data_id.name, keys, old_values, new_values);
 }
@@ -2704,22 +3025,22 @@ Mutate the token_properties of one token.
     assert!(signer::address_of(account) == creator, error::permission_denied(ENO_MUTATE_CAPABILITY));
     // validate if the properties is mutable
     assert!(exists<Collections>(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_token_data = &mut borrow_global_mut<Collections>(
+    let all_token_data = &mut Collections[
         creator
-    ).token_data;
+    ].token_data;
 
-    assert!(table::contains(all_token_data, token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
-    let token_data = table::borrow_mut(all_token_data, token_id.token_data_id);
+    assert!(all_token_data.contains(token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let token_data = all_token_data.borrow_mut(token_id.token_data_id);
 
-    // if default property is mutatable, token property is alwasy mutable
+    // if default property is mutatable, token property is always mutable
     // we only need to check TOKEN_PROPERTY_MUTABLE when default property is immutable
     if (!token_data.mutability_config.properties) {
         assert!(
-            property_map::contains_key(&token_data.default_properties, &string::utf8(TOKEN_PROPERTY_MUTABLE)),
+            token_data.default_properties.contains_key(&string::utf8(TOKEN_PROPERTY_MUTABLE)),
             error::permission_denied(EFIELD_NOT_MUTABLE)
         );
 
-        let token_prop_mutable = property_map::read_bool(&token_data.default_properties, &string::utf8(TOKEN_PROPERTY_MUTABLE));
+        let token_prop_mutable = token_data.default_properties.read_bool(&string::utf8(TOKEN_PROPERTY_MUTABLE));
         assert!(token_prop_mutable, error::permission_denied(EFIELD_NOT_MUTABLE));
     };
 
@@ -2737,24 +3058,26 @@ Mutate the token_properties of one token.
         direct_deposit(token_owner, new_token);
         update_token_property_internal(token_owner, new_token_id, keys, values, types);
         if (std::features::module_event_migration_enabled()) {
-            event::emit(MutateTokenPropertyMap {
+            event::emit(MutatePropertyMap {
+                account: token_owner,
                 old_id: token_id,
                 new_id: new_token_id,
                 keys,
                 values,
                 types
             });
+        } else {
+            event::emit_event<MutateTokenPropertyMapEvent>(
+                &mut TokenStore[token_owner].mutate_token_property_events,
+                MutateTokenPropertyMapEvent {
+                    old_id: token_id,
+                    new_id: new_token_id,
+                    keys,
+                    values,
+                    types
+                },
+            );
         };
-        event::emit_event<MutateTokenPropertyMapEvent>(
-            &mut borrow_global_mut<TokenStore>(token_owner).mutate_token_property_events,
-            MutateTokenPropertyMapEvent {
-                old_id: token_id,
-                new_id: new_token_id,
-                keys,
-                values,
-                types
-            },
-        );
 
         token_data.largest_property_version = cur_property_version;
         // burn the orignial property_version 0 token after mutation
@@ -2764,24 +3087,26 @@ Mutate the token_properties of one token.
         // only 1 copy for the token with property verion bigger than 0
         update_token_property_internal(token_owner, token_id, keys, values, types);
         if (std::features::module_event_migration_enabled()) {
-            event::emit(MutateTokenPropertyMap {
+            event::emit(MutatePropertyMap {
+                account: token_owner,
                 old_id: token_id,
                 new_id: token_id,
                 keys,
                 values,
                 types
             });
+        } else {
+            event::emit_event<MutateTokenPropertyMapEvent>(
+                &mut TokenStore[token_owner].mutate_token_property_events,
+                MutateTokenPropertyMapEvent {
+                    old_id: token_id,
+                    new_id: token_id,
+                    keys,
+                    values,
+                    types
+                },
+            );
         };
-        event::emit_event<MutateTokenPropertyMapEvent>(
-            &mut borrow_global_mut<TokenStore>(token_owner).mutate_token_property_events,
-            MutateTokenPropertyMapEvent {
-                old_id: token_id,
-                new_id: token_id,
-                keys,
-                values,
-                types
-            },
-        );
         token_id
     }
 }
@@ -2808,7 +3133,8 @@ Mutate the token_properties of one token.
 
 
public fun create_royalty(royalty_points_numerator: u64, royalty_points_denominator: u64, payee_address: address): Royalty {
     assert!(royalty_points_numerator <= royalty_points_denominator, error::invalid_argument(EINVALID_ROYALTY_NUMERATOR_DENOMINATOR));
-    assert!(account::exists_at(payee_address), error::invalid_argument(EROYALTY_PAYEE_ACCOUNT_DOES_NOT_EXIST));
+    // Question[Orderless]: Is it okay to remove this check to accommodate stateless accounts?
+    // assert!(account::exists_at(payee_address), error::invalid_argument(EROYALTY_PAYEE_ACCOUNT_DOES_NOT_EXIST));
     Royalty {
         royalty_points_numerator,
         royalty_points_denominator,
@@ -2865,7 +3191,7 @@ direct deposit if user opt in direct transfer
 
 
 
public fun direct_deposit_with_opt_in(account_addr: address, token: Token) acquires TokenStore {
-    let opt_in_transfer = borrow_global<TokenStore>(account_addr).direct_transfer;
+    let opt_in_transfer = TokenStore[account_addr].direct_transfer;
     assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER));
     direct_deposit(account_addr, token);
 }
@@ -2958,7 +3284,7 @@ direct deposit if user opt in direct transfer
 
 
public fun merge(dst_token: &mut Token, source_token: Token) {
     assert!(&dst_token.id == &source_token.id, error::invalid_argument(EINVALID_TOKEN_MERGE));
-    dst_token.amount = dst_token.amount + source_token.amount;
+    dst_token.amount += source_token.amount;
     let Token { id: _, amount: _, token_properties: _ } = source_token;
 }
 
@@ -2986,7 +3312,7 @@ direct deposit if user opt in direct transfer assert!(dst_token.id.property_version == 0, error::invalid_state(ENFT_NOT_SPLITABLE)); assert!(dst_token.amount > amount, error::invalid_argument(ETOKEN_SPLIT_AMOUNT_LARGER_OR_EQUAL_TO_TOKEN_AMOUNT)); assert!(amount > 0, error::invalid_argument(ETOKEN_CANNOT_HAVE_ZERO_AMOUNT)); - dst_token.amount = dst_token.amount - amount; + dst_token.amount -= amount; Token { id: dst_token.id, amount, @@ -3045,7 +3371,7 @@ Transfers amount of tokens from from to toto: address, amount: u64, ) acquires TokenStore { - let opt_in_transfer = borrow_global<TokenStore>(to).direct_transfer; + let opt_in_transfer = TokenStore[to].direct_transfer; assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER)); let token = withdraw_token(from, id, amount); direct_deposit(to, token); @@ -3232,8 +3558,8 @@ Create a new collection to hold tokens maximum: u64, mutate_setting: vector<bool> ) acquires Collections { - assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); + assert!(name.length() <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); let account_addr = signer::address_of(creator); if (!exists<Collections>(account_addr)) { move_to( @@ -3248,25 +3574,25 @@ Create a new collection to hold tokens ) }; - let collection_data = &mut borrow_global_mut<Collections>(account_addr).collection_data; + let collection_data = &mut Collections[account_addr].collection_data; assert!( - !table::contains(collection_data, name), + !collection_data.contains(name), error::already_exists(ECOLLECTION_ALREADY_EXISTS), ); let mutability_config = create_collection_mutability_config(&mutate_setting); let collection = CollectionData { description, - name: name, + name, uri, supply: 0, maximum, mutability_config }; - table::add(collection_data, name, collection); - let collection_handle = borrow_global_mut<Collections>(account_addr); + collection_data.add(name, collection); + let collection_handle = &mut Collections[account_addr]; if (std::features::module_event_migration_enabled()) { event::emit( CreateCollection { @@ -3277,17 +3603,18 @@ Create a new collection to hold tokens maximum, } ); + } else { + event::emit_event<CreateCollectionEvent>( + &mut collection_handle.create_collection_events, + CreateCollectionEvent { + creator: account_addr, + collection_name: name, + uri, + description, + maximum, + } + ); }; - event::emit_event<CreateCollectionEvent>( - &mut collection_handle.create_collection_events, - CreateCollectionEvent { - creator: account_addr, - collection_name: name, - uri, - description, - maximum, - } - ); }
@@ -3316,8 +3643,8 @@ Create a new collection to hold tokens error::not_found(ECOLLECTIONS_NOT_PUBLISHED), ); - let collection_data = &borrow_global<Collections>(creator).collection_data; - table::contains(collection_data, name) + let collection_data = &Collections[creator].collection_data; + collection_data.contains(name) }
@@ -3346,9 +3673,9 @@ Create a new collection to hold tokens error::not_found(ECOLLECTIONS_NOT_PUBLISHED), ); - let token_data = &borrow_global<Collections>(creator).token_data; + let token_data = &Collections[creator].token_data; let token_data_id = create_token_data_id(creator, collection_name, token_name); - table::contains(token_data, token_data_id) + token_data.contains(token_data_id) }
@@ -3386,9 +3713,9 @@ Create a new collection to hold tokens property_values: vector<vector<u8>>, property_types: vector<String> ): TokenDataId acquires Collections { - assert!(string::length(&name) <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG)); - assert!(string::length(&collection) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); + assert!(name.length() <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG)); + assert!(collection.length() <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); assert!(royalty_points_numerator <= royalty_points_denominator, error::invalid_argument(EINVALID_ROYALTY_NUMERATOR_DENOMINATOR)); let account_addr = signer::address_of(account); @@ -3396,25 +3723,25 @@ Create a new collection to hold tokens exists<Collections>(account_addr), error::not_found(ECOLLECTIONS_NOT_PUBLISHED), ); - let collections = borrow_global_mut<Collections>(account_addr); + let collections = &mut Collections[account_addr]; let token_data_id = create_token_data_id(account_addr, collection, name); assert!( - table::contains(&collections.collection_data, token_data_id.collection), + collections.collection_data.contains(token_data_id.collection), error::not_found(ECOLLECTION_NOT_PUBLISHED), ); assert!( - !table::contains(&collections.token_data, token_data_id), + !collections.token_data.contains(token_data_id), error::already_exists(ETOKEN_DATA_ALREADY_EXISTS), ); - let collection = table::borrow_mut(&mut collections.collection_data, token_data_id.collection); + let collection = collections.collection_data.borrow_mut(token_data_id.collection); // if collection maximum == 0, user don't want to enforce supply constraint. // we don't track supply to make token creation parallelizable if (collection.maximum > 0) { - collection.supply = collection.supply + 1; + collection.supply += 1; assert!( collection.maximum >= collection.supply, error::invalid_argument(ECREATE_WOULD_EXCEED_COLLECTION_MAXIMUM), @@ -3433,10 +3760,11 @@ Create a new collection to hold tokens mutability_config: token_mutate_config, }; - table::add(&mut collections.token_data, token_data_id, token_data); + collections.token_data.add(token_data_id, token_data); if (std::features::module_event_migration_enabled()) { event::emit( - CreateTokenData { + TokenDataCreation { + creator: account_addr, id: token_data_id, description, maximum, @@ -3451,25 +3779,26 @@ Create a new collection to hold tokens property_types, } ); + } else { + event::emit_event<CreateTokenDataEvent>( + &mut collections.create_token_data_events, + CreateTokenDataEvent { + id: token_data_id, + description, + maximum, + uri, + royalty_payee_address, + royalty_points_denominator, + royalty_points_numerator, + name, + mutability_config: token_mutate_config, + property_keys, + property_values, + property_types, + }, + ); }; - event::emit_event<CreateTokenDataEvent>( - &mut collections.create_token_data_events, - CreateTokenDataEvent { - id: token_data_id, - description, - maximum, - uri, - royalty_payee_address, - royalty_points_denominator, - royalty_points_numerator, - name, - mutability_config: token_mutate_config, - property_keys, - property_values, - property_types, - }, - ); token_data_id }
@@ -3496,7 +3825,9 @@ return the number of distinct token_data_id created under this collection
public fun get_collection_supply(creator_address: address, collection_name: String): Option<u64> acquires Collections {
     assert_collection_exists(creator_address, collection_name);
-    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    let collection_data = Collections[creator_address].collection_data.borrow_mut(
+        collection_name
+    );
 
     if (collection_data.maximum > 0) {
         option::some(collection_data.supply)
@@ -3527,7 +3858,9 @@ return the number of distinct token_data_id created under this collection
 
 
public fun get_collection_description(creator_address: address, collection_name: String): String acquires Collections {
     assert_collection_exists(creator_address, collection_name);
-    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    let collection_data = Collections[creator_address].collection_data.borrow_mut(
+        collection_name
+    );
     collection_data.description
 }
 
@@ -3553,7 +3886,9 @@ return the number of distinct token_data_id created under this collection
public fun get_collection_uri(creator_address: address, collection_name: String): String acquires Collections {
     assert_collection_exists(creator_address, collection_name);
-    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    let collection_data = Collections[creator_address].collection_data.borrow_mut(
+        collection_name
+    );
     collection_data.uri
 }
 
@@ -3579,7 +3914,9 @@ return the number of distinct token_data_id created under this collection
public fun get_collection_maximum(creator_address: address, collection_name: String): u64 acquires Collections {
     assert_collection_exists(creator_address, collection_name);
-    let collection_data = table::borrow_mut(&mut borrow_global_mut<Collections>(creator_address).collection_data, collection_name);
+    let collection_data = Collections[creator_address].collection_data.borrow_mut(
+        collection_name
+    );
     collection_data.maximum
 }
 
@@ -3606,9 +3943,9 @@ return the number of distinct token_id created under this TokenData
public fun get_token_supply(creator_address: address, token_data_id: TokenDataId): Option<u64> acquires Collections {
     assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
-    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
-    let token_data = table::borrow(all_token_data, token_data_id);
+    let all_token_data = &Collections[creator_address].token_data;
+    assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let token_data = all_token_data.borrow(token_data_id);
 
     if (token_data.maximum > 0) {
         option::some(token_data.supply)
@@ -3640,9 +3977,9 @@ return the largest_property_version of this TokenData
 
 
public fun get_tokendata_largest_property_version(creator_address: address, token_data_id: TokenDataId): u64 acquires Collections {
     assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
-    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
-    table::borrow(all_token_data, token_data_id).largest_property_version
+    let all_token_data = &Collections[creator_address].token_data;
+    assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    all_token_data.borrow(token_data_id).largest_property_version
 }
 
@@ -3695,7 +4032,7 @@ return the TokenId for a given Token return false }; - borrow_global<TokenStore>(receiver).direct_transfer + TokenStore[receiver].direct_transfer }
@@ -3720,11 +4057,11 @@ return the TokenId for a given Token
public fun create_token_mutability_config(mutate_setting: &vector<bool>): TokenMutabilityConfig {
     TokenMutabilityConfig {
-        maximum: *vector::borrow(mutate_setting, TOKEN_MAX_MUTABLE_IND),
-        uri: *vector::borrow(mutate_setting, TOKEN_URI_MUTABLE_IND),
-        royalty: *vector::borrow(mutate_setting, TOKEN_ROYALTY_MUTABLE_IND),
-        description: *vector::borrow(mutate_setting, TOKEN_DESCRIPTION_MUTABLE_IND),
-        properties: *vector::borrow(mutate_setting, TOKEN_PROPERTY_MUTABLE_IND),
+        maximum: mutate_setting[TOKEN_MAX_MUTABLE_IND],
+        uri: mutate_setting[TOKEN_URI_MUTABLE_IND],
+        royalty: mutate_setting[TOKEN_ROYALTY_MUTABLE_IND],
+        description: mutate_setting[TOKEN_DESCRIPTION_MUTABLE_IND],
+        properties: mutate_setting[TOKEN_PROPERTY_MUTABLE_IND],
     }
 }
 
@@ -3750,9 +4087,9 @@ return the TokenId for a given Token
public fun create_collection_mutability_config(mutate_setting: &vector<bool>): CollectionMutabilityConfig {
     CollectionMutabilityConfig {
-        description: *vector::borrow(mutate_setting, COLLECTION_DESCRIPTION_MUTABLE_IND),
-        uri: *vector::borrow(mutate_setting, COLLECTION_URI_MUTABLE_IND),
-        maximum: *vector::borrow(mutate_setting, COLLECTION_MAX_MUTABLE_IND),
+        description: mutate_setting[COLLECTION_DESCRIPTION_MUTABLE_IND],
+        uri: mutate_setting[COLLECTION_URI_MUTABLE_IND],
+        maximum: mutate_setting[COLLECTION_MAX_MUTABLE_IND],
     }
 }
 
@@ -3783,27 +4120,28 @@ return the TokenId for a given Token ): TokenId acquires Collections, TokenStore { assert!(token_data_id.creator == signer::address_of(account), error::permission_denied(ENO_MINT_CAPABILITY)); let creator_addr = token_data_id.creator; - let all_token_data = &mut borrow_global_mut<Collections>(creator_addr).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let token_data = all_token_data.borrow_mut(token_data_id); if (token_data.maximum > 0) { assert!(token_data.supply + amount <= token_data.maximum, error::invalid_argument(EMINT_WOULD_EXCEED_TOKEN_MAXIMUM)); - token_data.supply = token_data.supply + amount; + token_data.supply += amount; }; // we add more tokens with property_version 0 let token_id = create_token_id(token_data_id, 0); if (std::features::module_event_migration_enabled()) { - event::emit(MintToken { id: token_data_id, amount }) + event::emit(Mint { creator: creator_addr, id: token_data_id, amount }) + } else { + event::emit_event<MintTokenEvent>( + &mut Collections[creator_addr].mint_token_events, + MintTokenEvent { + id: token_data_id, + amount, + } + ); }; - event::emit_event<MintTokenEvent>( - &mut borrow_global_mut<Collections>(creator_addr).mint_token_events, - MintTokenEvent { - id: token_data_id, - amount, - } - ); deposit_token(account, Token { @@ -3844,33 +4182,34 @@ create tokens and directly deposite to receiver's address. The receiver should o amount: u64, ) acquires Collections, TokenStore { assert!(exists<TokenStore>(receiver), error::not_found(ETOKEN_STORE_NOT_PUBLISHED)); - let opt_in_transfer = borrow_global<TokenStore>(receiver).direct_transfer; + let opt_in_transfer = TokenStore[receiver].direct_transfer; assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER)); assert!(token_data_id.creator == signer::address_of(account), error::permission_denied(ENO_MINT_CAPABILITY)); let creator_addr = token_data_id.creator; - let all_token_data = &mut borrow_global_mut<Collections>(creator_addr).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let token_data = all_token_data.borrow_mut(token_data_id); if (token_data.maximum > 0) { assert!(token_data.supply + amount <= token_data.maximum, error::invalid_argument(EMINT_WOULD_EXCEED_TOKEN_MAXIMUM)); - token_data.supply = token_data.supply + amount; + token_data.supply += amount; }; // we add more tokens with property_version 0 let token_id = create_token_id(token_data_id, 0); if (std::features::module_event_migration_enabled()) { - event::emit(MintToken { id: token_data_id, amount }) + event::emit(Mint { creator: creator_addr, id: token_data_id, amount }) + } else { + event::emit_event<MintTokenEvent>( + &mut Collections[creator_addr].mint_token_events, + MintTokenEvent { + id: token_data_id, + amount, + } + ); }; - event::emit_event<MintTokenEvent>( - &mut borrow_global_mut<Collections>(creator_addr).mint_token_events, - MintTokenEvent { - id: token_data_id, - amount, - } - ); direct_deposit(receiver, Token { @@ -3933,8 +4272,8 @@ create tokens and directly deposite to receiver's address. The receiver should o collection: String, name: String, ): TokenDataId { - assert!(string::length(&collection) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); - assert!(string::length(&name) <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG)); + assert!(collection.length() <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); + assert!(name.length() <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG)); TokenDataId { creator, collection, name } }
@@ -3994,9 +4333,9 @@ create tokens and directly deposite to receiver's address. The receiver should o if (!exists<TokenStore>(owner)) { return 0 }; - let token_store = borrow_global<TokenStore>(owner); - if (table::contains(&token_store.tokens, id)) { - table::borrow(&token_store.tokens, id).amount + let token_store = &TokenStore[owner]; + if (token_store.tokens.contains(id)) { + token_store.tokens.borrow(id).amount } else { 0 } @@ -4233,13 +4572,13 @@ if property_version > 0, return the property value stored at owner's token store // if property_version = 0, return default property map if (token_id.property_version == 0) { let creator_addr = token_id.token_data_id.creator; - let all_token_data = &borrow_global<Collections>(creator_addr).token_data; - assert!(table::contains(all_token_data, token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow(all_token_data, token_id.token_data_id); + let all_token_data = &Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let token_data = all_token_data.borrow(token_id.token_data_id); token_data.default_properties } else { - let tokens = &borrow_global<TokenStore>(owner).tokens; - table::borrow(tokens, token_id).token_properties + let tokens = &TokenStore[owner].tokens; + tokens.borrow(token_id).token_properties } }
@@ -4266,10 +4605,10 @@ if property_version > 0, return the property value stored at owner's token store
public fun get_tokendata_maximum(token_data_id: TokenDataId): u64 acquires Collections {
     let creator_address = token_data_id.creator;
     assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
-    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let all_token_data = &Collections[creator_address].token_data;
+    assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
 
-    let token_data = table::borrow(all_token_data, token_data_id);
+    let token_data = all_token_data.borrow(token_data_id);
     token_data.maximum
 }
 
@@ -4295,10 +4634,10 @@ if property_version > 0, return the property value stored at owner's token store
public fun get_tokendata_uri(creator: address, token_data_id: TokenDataId): String acquires Collections {
     assert!(exists<Collections>(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_token_data = &borrow_global<Collections>(creator).token_data;
-    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let all_token_data = &Collections[creator].token_data;
+    assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
 
-    let token_data = table::borrow(all_token_data, token_data_id);
+    let token_data = all_token_data.borrow(token_data_id);
     token_data.uri
 }
 
@@ -4325,10 +4664,10 @@ if property_version > 0, return the property value stored at owner's token store
public fun get_tokendata_description(token_data_id: TokenDataId): String acquires Collections {
     let creator_address = token_data_id.creator;
     assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
-    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let all_token_data = &Collections[creator_address].token_data;
+    assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
 
-    let token_data = table::borrow(all_token_data, token_data_id);
+    let token_data = all_token_data.borrow(token_data_id);
     token_data.description
 }
 
@@ -4355,10 +4694,10 @@ if property_version > 0, return the property value stored at owner's token store
public fun get_tokendata_royalty(token_data_id: TokenDataId): Royalty acquires Collections {
     let creator_address = token_data_id.creator;
     assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_token_data = &borrow_global<Collections>(creator_address).token_data;
-    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    let all_token_data = &Collections[creator_address].token_data;
+    assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
 
-    let token_data = table::borrow(all_token_data, token_data_id);
+    let token_data = all_token_data.borrow(token_data_id);
     token_data.royalty
 }
 
@@ -4411,9 +4750,9 @@ return the mutation setting of the token
public fun get_tokendata_mutability_config(token_data_id: TokenDataId): TokenMutabilityConfig acquires Collections {
     let creator_addr = token_data_id.creator;
     assert!(exists<Collections>(creator_addr), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_token_data = &borrow_global<Collections>(creator_addr).token_data;
-    assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
-    table::borrow(all_token_data, token_data_id).mutability_config
+    let all_token_data = &Collections[creator_addr].token_data;
+    assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED));
+    all_token_data.borrow(token_data_id).mutability_config
 }
 
@@ -4568,9 +4907,9 @@ return the collection mutation setting collection_name: String ): CollectionMutabilityConfig acquires Collections { assert!(exists<Collections>(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_collection_data = &borrow_global<Collections>(creator).collection_data; - assert!(table::contains(all_collection_data, collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED)); - table::borrow(all_collection_data, collection_name).mutability_config + let all_collection_data = &Collections[creator].collection_data; + assert!(all_collection_data.contains(collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED)); + all_collection_data.borrow(collection_name).mutability_config }
@@ -4748,26 +5087,28 @@ return if the collection maximum is mutable with collection mutability config error::not_found(ETOKEN_STORE_NOT_PUBLISHED), ); - let token_store = borrow_global_mut<TokenStore>(account_addr); + let token_store = &mut TokenStore[account_addr]; if (std::features::module_event_migration_enabled()) { - event::emit(Withdraw { id, amount }) + event::emit(TokenWithdraw { account: account_addr, id, amount }) + } else { + event::emit_event<WithdrawEvent>( + &mut token_store.withdraw_events, + WithdrawEvent { id, amount } + ); }; - event::emit_event<WithdrawEvent>( - &mut token_store.withdraw_events, - WithdrawEvent { id, amount } - ); - let tokens = &mut borrow_global_mut<TokenStore>(account_addr).tokens; + + let tokens = &mut TokenStore[account_addr].tokens; assert!( - table::contains(tokens, id), + tokens.contains(id), error::not_found(ENO_TOKEN_IN_TOKEN_STORE), ); // balance > amount and amount > 0 indirectly asserted that balance > 0. - let balance = &mut table::borrow_mut(tokens, id).amount; + let balance = &mut tokens.borrow_mut(id).amount; if (*balance > amount) { - *balance = *balance - amount; + *balance -= amount; Token { id, amount, token_properties: property_map::empty() } } else { - table::remove(tokens, id) + tokens.remove(id) } }
@@ -4798,12 +5139,12 @@ return if the collection maximum is mutable with collection mutability config values: vector<vector<u8>>, types: vector<String>, ) acquires TokenStore { - let tokens = &mut borrow_global_mut<TokenStore>(token_owner).tokens; - assert!(table::contains(tokens, token_id), error::not_found(ENO_TOKEN_IN_TOKEN_STORE)); + let tokens = &mut TokenStore[token_owner].tokens; + assert!(tokens.contains(token_id), error::not_found(ENO_TOKEN_IN_TOKEN_STORE)); - let value = &mut table::borrow_mut(tokens, token_id).token_properties; + let value = &mut tokens.borrow_mut(token_id).token_properties; assert_non_standard_reserved_property(&keys); - property_map::update_property_map(value, keys, values, types); + value.update_property_map(keys, values, types); }
@@ -4829,25 +5170,26 @@ Deposit the token balance into the recipients account and emit an event.
fun direct_deposit(account_addr: address, token: Token) acquires TokenStore {
     assert!(token.amount > 0, error::invalid_argument(ETOKEN_CANNOT_HAVE_ZERO_AMOUNT));
-    let token_store = borrow_global_mut<TokenStore>(account_addr);
+    let token_store = &mut TokenStore[account_addr];
 
     if (std::features::module_event_migration_enabled()) {
-        event::emit(Deposit { id: token.id, amount: token.amount });
+        event::emit(TokenDeposit { account: account_addr, id: token.id, amount: token.amount });
+    } else {
+        event::emit_event<DepositEvent>(
+            &mut token_store.deposit_events,
+            DepositEvent { id: token.id, amount: token.amount },
+        );
     };
-    event::emit_event<DepositEvent>(
-        &mut token_store.deposit_events,
-        DepositEvent { id: token.id, amount: token.amount },
-    );
 
     assert!(
         exists<TokenStore>(account_addr),
         error::not_found(ETOKEN_STORE_NOT_PUBLISHED),
     );
 
-    if (!table::contains(&token_store.tokens, token.id)) {
-        table::add(&mut token_store.tokens, token.id, token);
+    if (!token_store.tokens.contains(token.id)) {
+        token_store.tokens.add(token.id, token);
     } else {
-        let recipient_token = table::borrow_mut(&mut token_store.tokens, token.id);
+        let recipient_token = token_store.tokens.borrow_mut(token.id);
         merge(recipient_token, token);
     };
 }
@@ -4874,8 +5216,8 @@ Deposit the token balance into the recipients account and emit an event.
 
 
fun assert_collection_exists(creator_address: address, collection_name: String) acquires Collections {
     assert!(exists<Collections>(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED));
-    let all_collection_data = &borrow_global<Collections>(creator_address).collection_data;
-    assert!(table::contains(all_collection_data, collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED));
+    let all_collection_data = &Collections[creator_address].collection_data;
+    assert!(all_collection_data.contains(collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED));
 }
 
@@ -4902,8 +5244,8 @@ Deposit the token balance into the recipients account and emit an event. let creator_addr = token_data_id.creator; assert!(signer::address_of(creator) == creator_addr, error::permission_denied(ENO_MUTATE_CAPABILITY)); assert!(exists<Collections>(creator_addr), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &mut borrow_global_mut<Collections>(creator_addr).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let all_token_data = &Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); }
@@ -4927,11 +5269,11 @@ Deposit the token balance into the recipients account and emit an event.
fun assert_non_standard_reserved_property(keys: &vector<String>) {
-    vector::for_each_ref(keys, |key| {
+    keys.for_each_ref(|key| {
         let key: &String = key;
-        let length = string::length(key);
+        let length = key.length();
         if (length >= 6) {
-            let prefix = string::sub_string(&*key, 0, 6);
+            let prefix = key.sub_string(0, 6);
             assert!(prefix != string::utf8(b"TOKEN_"), error::permission_denied(EPROPERTY_RESERVED_BY_STANDARD));
         };
     });
@@ -4997,7 +5339,7 @@ Deposit the token balance into the recipients account and emit an event.
 
 
 
pragma verify = true;
-pragma aborts_if_is_strict;
+pragma aborts_if_is_partial;
 
@@ -5090,11 +5432,11 @@ only creator of the tokendata can mint tokens aborts_if token_data_id.creator != signer::address_of(account); include CreateTokenDataIdAbortsIf{ creator: token_data_address, -collection: collection, -name: name +collection, +name }; include MintTokenAbortsIf { -token_data_id: token_data_id +token_data_id };
@@ -5116,7 +5458,7 @@ The signer is creator. let addr = signer::address_of(account); aborts_if addr != creator; include CreateTokenDataIdAbortsIf { - creator: creator, + creator, collection: collection_name, name: token_name }; @@ -5138,8 +5480,8 @@ The signer is creator.
pragma aborts_if_is_partial;
 include CreateTokenDataIdAbortsIf{
     creator: creators_address,
-    collection: collection,
-    name: name
+    collection,
+    name
 };
 
@@ -5159,12 +5501,6 @@ The signer is creator.
pragma aborts_if_is_partial;
 let addr = signer::address_of(account);
 let account_addr = global<account::Account>(addr);
-aborts_if !exists<TokenStore>(addr) && !exists<account::Account>(addr);
-aborts_if !exists<TokenStore>(addr) && account_addr.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM;
-aborts_if !exists<TokenStore>(addr) && account_addr.guid_creation_num + 4 > MAX_U64;
-aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account_addr.guid_creation_num + 9 > account::MAX_GUID_CREATION_NUM;
-aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account_addr.guid_creation_num + 9 > MAX_U64;
-aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
 
@@ -5182,7 +5518,7 @@ The signer is creator.
pragma aborts_if_is_partial;
 include CreateTokenDataIdAbortsIf{
-    creator: creator,
+    creator,
     collection: collection_name,
     name: token_name
 };
@@ -5287,12 +5623,9 @@ The description of Collection is mutable.
 let collection_data = table::spec_get(global<Collections>(addr).collection_data, collection_name);
 include AssertCollectionExistsAbortsIf {
     creator_address: addr,
-    collection_name: collection_name
+    collection_name
 };
 aborts_if !collection_data.mutability_config.description;
-aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr);
-aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM;
-aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
 
@@ -5315,12 +5648,9 @@ The uri of Collection is mutable. aborts_if len(uri.bytes) > MAX_URI_LENGTH; include AssertCollectionExistsAbortsIf { creator_address: addr, - collection_name: collection_name + collection_name }; aborts_if !collection_data.mutability_config.uri; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr); -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
@@ -5344,14 +5674,11 @@ The maxium of Collection is mutable. let collection_data = table::spec_get(global<Collections>(addr).collection_data, collection_name); include AssertCollectionExistsAbortsIf { creator_address: addr, - collection_name: collection_name + collection_name }; aborts_if collection_data.maximum == 0 || maximum == 0; aborts_if maximum < collection_data.supply; aborts_if !collection_data.mutability_config.maximum; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr); -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
@@ -5378,9 +5705,6 @@ The token maximum is mutable aborts_if token_data.maximum == 0 || maximum == 0; aborts_if maximum < token_data.supply; aborts_if !token_data.mutability_config.maximum; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr); -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
@@ -5406,9 +5730,6 @@ The token uri is mutable include AssertTokendataExistsAbortsIf; aborts_if len(uri.bytes) > MAX_URI_LENGTH; aborts_if !token_data.mutability_config.uri; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr); -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
@@ -5431,9 +5752,6 @@ The token royalty is mutable let all_token_data = global<Collections>(token_data_id.creator).token_data; let token_data = table::spec_get(all_token_data, token_data_id); aborts_if !token_data.mutability_config.royalty; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr); -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
@@ -5456,9 +5774,6 @@ The token description is mutable let all_token_data = global<Collections>(token_data_id.creator).token_data; let token_data = table::spec_get(all_token_data, token_data_id); aborts_if !token_data.mutability_config.description; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && !exists<account::Account>(addr); -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; -aborts_if !exists<token_event_store::TokenEventStoreV1>(addr) && account.guid_creation_num + 9 > MAX_U64;
@@ -5539,7 +5854,6 @@ The royalty_points_numerator should less than royalty_points_denominator. royalty_points_denominator: u64; payee_address: address; aborts_if royalty_points_numerator > royalty_points_denominator; - aborts_if !exists<account::Account>(payee_address); }
@@ -5631,9 +5945,6 @@ Make sure the account has sufficient tokens to withdraw. account: signer; let addr = signer::address_of(account); let account_addr = global<account::Account>(addr); - aborts_if !exists<TokenStore>(addr) && !exists<account::Account>(addr); - aborts_if !exists<TokenStore>(addr) && account_addr.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists<TokenStore>(addr) && account_addr.guid_creation_num + 4 > MAX_U64; }
@@ -5798,11 +6109,6 @@ The collection_data should not exist before you create it. let collection = global<Collections>(addr); let b = !exists<Collections>(addr); let collection_data = global<Collections>(addr).collection_data; - aborts_if b && !exists<account::Account>(addr); - aborts_if len(name.bytes) > MAX_COLLECTION_NAME_LENGTH; - aborts_if len(uri.bytes) > MAX_URI_LENGTH; - aborts_if b && account.guid_creation_num + 3 >= account::MAX_GUID_CREATION_NUM; - aborts_if b && account.guid_creation_num + 3 > MAX_U64; include CreateCollectionMutabilityConfigAbortsIf; }
@@ -5840,7 +6146,7 @@ The length of name should less than MAX_NFT_NAME_LENGTH
aborts_if !exists<Collections>(creator);
 include CreateTokenDataIdAbortsIf {
-    creator: creator,
+    creator,
     collection: collection_name,
     name: token_name
 };
@@ -5875,8 +6181,8 @@ The length of name should less than MAX_NFT_NAME_LENGTH
 aborts_if !exists<Collections>(account_addr);
 include CreateTokenDataIdAbortsIf {
     creator: account_addr,
-    collection: collection,
-    name: name
+    collection,
+    name
 };
 aborts_if !table::spec_contains(collections.collection_data, collection);
 aborts_if table::spec_contains(collections.token_data, token_data_id);
@@ -6142,7 +6448,7 @@ The sum of supply and the amount of mint Token is less than maximum.
 let token_id = create_token_id(token_data_id, 0);
 include DirectDepositAbortsIf {
     account_addr: receiver,
-    token_id: token_id,
+    token_id,
     token_amount: amount,
 };
 
diff --git a/aptos-move/framework/aptos-token/doc/token_event_store.md b/aptos-move/framework/aptos-token/doc/token_event_store.md index bc8bc9356b86d..2021435244957 100644 --- a/aptos-move/framework/aptos-token/doc/token_event_store.md +++ b/aptos-move/framework/aptos-token/doc/token_event_store.md @@ -11,7 +11,7 @@ This module provides utils to add and emit new token events that are not in toke - [Struct `CollectionUriMutateEvent`](#0x3_token_event_store_CollectionUriMutateEvent) - [Struct `CollectionUriMutate`](#0x3_token_event_store_CollectionUriMutate) - [Struct `CollectionMaxiumMutateEvent`](#0x3_token_event_store_CollectionMaxiumMutateEvent) -- [Struct `CollectionMaxiumMutate`](#0x3_token_event_store_CollectionMaxiumMutate) +- [Struct `CollectionMaximumMutate`](#0x3_token_event_store_CollectionMaximumMutate) - [Struct `OptInTransferEvent`](#0x3_token_event_store_OptInTransferEvent) - [Struct `OptInTransfer`](#0x3_token_event_store_OptInTransfer) - [Struct `UriMutationEvent`](#0x3_token_event_store_UriMutationEvent) @@ -25,6 +25,7 @@ This module provides utils to add and emit new token events that are not in toke - [Struct `MaxiumMutateEvent`](#0x3_token_event_store_MaxiumMutateEvent) - [Struct `MaximumMutate`](#0x3_token_event_store_MaximumMutate) - [Resource `TokenEventStoreV1`](#0x3_token_event_store_TokenEventStoreV1) +- [Struct `CollectionMaxiumMutate`](#0x3_token_event_store_CollectionMaxiumMutate) - [Function `initialize_token_event_store`](#0x3_token_event_store_initialize_token_event_store) - [Function `emit_collection_uri_mutate_event`](#0x3_token_event_store_emit_collection_uri_mutate_event) - [Function `emit_collection_description_mutate_event`](#0x3_token_event_store_emit_collection_description_mutate_event) @@ -292,15 +293,15 @@ Event emitted when the collection maximum is mutated - + -## Struct `CollectionMaxiumMutate` +## Struct `CollectionMaximumMutate` Event emitted when the collection maximum is mutated
#[event]
-struct CollectionMaxiumMutate has drop, store
+struct CollectionMaximumMutate has drop, store
 
@@ -1066,6 +1067,53 @@ Event emitted when the token maximum is mutated + + + + +## Struct `CollectionMaxiumMutate` + + + +
#[event]
+#[deprecated]
+struct CollectionMaxiumMutate has drop, store
+
+ + + +
+Fields + + +
+
+creator_addr: address +
+
+ +
+
+collection_name: string::String +
+
+ +
+
+old_maximum: u64 +
+
+ +
+
+new_maximum: u64 +
+
+ +
+
+ +
@@ -1121,7 +1169,7 @@ Emit the collection uri mutation event Implementation -
public(friend) fun emit_collection_uri_mutate_event(creator: &signer, collection: String, old_uri: String, new_uri: String) acquires TokenEventStoreV1 {
+
friend fun emit_collection_uri_mutate_event(creator: &signer, collection: String, old_uri: String, new_uri: String) acquires TokenEventStoreV1 {
     let event = CollectionUriMutateEvent {
         creator_addr: signer::address_of(creator),
         collection_name: collection,
@@ -1129,7 +1177,7 @@ Emit the collection uri mutation event
         new_uri,
     };
     initialize_token_event_store(creator);
-    let token_event_store = borrow_global_mut<TokenEventStoreV1>(signer::address_of(creator));
+    let token_event_store = &mut TokenEventStoreV1[signer::address_of(creator)];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
             CollectionUriMutate {
@@ -1139,11 +1187,12 @@ Emit the collection uri mutation event
                 new_uri,
             }
         );
+    } else {
+        event::emit_event<CollectionUriMutateEvent>(
+            &mut token_event_store.collection_uri_mutate_events,
+            event,
+        );
     };
-    event::emit_event<CollectionUriMutateEvent>(
-        &mut token_event_store.collection_uri_mutate_events,
-        event,
-    );
 }
 
@@ -1167,7 +1216,7 @@ Emit the collection description mutation event Implementation -
public(friend) fun emit_collection_description_mutate_event(creator: &signer, collection: String, old_description: String, new_description: String) acquires TokenEventStoreV1 {
+
friend fun emit_collection_description_mutate_event(creator: &signer, collection: String, old_description: String, new_description: String) acquires TokenEventStoreV1 {
     let event = CollectionDescriptionMutateEvent {
         creator_addr: signer::address_of(creator),
         collection_name: collection,
@@ -1175,7 +1224,7 @@ Emit the collection description mutation event
         new_description,
     };
     initialize_token_event_store(creator);
-    let token_event_store = borrow_global_mut<TokenEventStoreV1>(signer::address_of(creator));
+    let token_event_store = &mut TokenEventStoreV1[signer::address_of(creator)];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
             CollectionDescriptionMutate {
@@ -1185,11 +1234,12 @@ Emit the collection description mutation event
                 new_description,
             }
         );
-    };
-    event::emit_event<CollectionDescriptionMutateEvent>(
-        &mut token_event_store.collection_description_mutate_events,
-        event,
-    );
+    } else {
+        event::emit_event<CollectionDescriptionMutateEvent>(
+            &mut token_event_store.collection_description_mutate_events,
+            event,
+        );
+    }
 }
 
@@ -1213,7 +1263,7 @@ Emit the collection maximum mutation event Implementation -
public(friend) fun emit_collection_maximum_mutate_event(creator: &signer, collection: String, old_maximum: u64, new_maximum: u64) acquires TokenEventStoreV1 {
+
friend fun emit_collection_maximum_mutate_event(creator: &signer, collection: String, old_maximum: u64, new_maximum: u64) acquires TokenEventStoreV1 {
     let event = CollectionMaxiumMutateEvent {
         creator_addr: signer::address_of(creator),
         collection_name: collection,
@@ -1221,21 +1271,22 @@ Emit the collection maximum mutation event
         new_maximum,
     };
     initialize_token_event_store(creator);
-    let token_event_store = borrow_global_mut<TokenEventStoreV1>(signer::address_of(creator));
+    let token_event_store = &mut TokenEventStoreV1[signer::address_of(creator)];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
-            CollectionMaxiumMutate {
+            CollectionMaximumMutate {
                 creator_addr: signer::address_of(creator),
                 collection_name: collection,
                 old_maximum,
                 new_maximum,
             }
         );
+    } else {
+        event::emit_event<CollectionMaxiumMutateEvent>(
+            &mut token_event_store.collection_maximum_mutate_events,
+            event,
+        );
     };
-    event::emit_event<CollectionMaxiumMutateEvent>(
-        &mut token_event_store.collection_maximum_mutate_events,
-        event,
-    );
 }
 
@@ -1259,23 +1310,24 @@ Emit the direct opt-in event Implementation -
public(friend) fun emit_token_opt_in_event(account: &signer, opt_in: bool) acquires TokenEventStoreV1 {
+
friend fun emit_token_opt_in_event(account: &signer, opt_in: bool) acquires TokenEventStoreV1 {
     let opt_in_event = OptInTransferEvent {
       opt_in,
     };
     initialize_token_event_store(account);
-    let token_event_store = borrow_global_mut<TokenEventStoreV1>(signer::address_of(account));
+    let token_event_store = &mut TokenEventStoreV1[signer::address_of(account)];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
             OptInTransfer {
                 account_address: signer::address_of(account),
                 opt_in,
             });
-    };
-    event::emit_event<OptInTransferEvent>(
-        &mut token_event_store.opt_in_events,
-        opt_in_event,
-    );
+    } else {
+        event::emit_event<OptInTransferEvent>(
+            &mut token_event_store.opt_in_events,
+            opt_in_event,
+        );
+    }
 }
 
@@ -1299,7 +1351,7 @@ Emit URI mutation event Implementation -
public(friend) fun emit_token_uri_mutate_event(
+
friend fun emit_token_uri_mutate_event(
     creator: &signer,
     collection: String,
     token: String,
@@ -1317,7 +1369,7 @@ Emit URI mutation event
     };
 
     initialize_token_event_store(creator);
-    let token_event_store = borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    let token_event_store = &mut TokenEventStoreV1[creator_addr];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
             UriMutation {
@@ -1327,11 +1379,12 @@ Emit URI mutation event
                 old_uri,
                 new_uri,
             });
+    } else {
+        event::emit_event<UriMutationEvent>(
+            &mut token_event_store.uri_mutate_events,
+            event,
+        );
     };
-    event::emit_event<UriMutationEvent>(
-        &mut token_event_store.uri_mutate_events,
-        event,
-    );
 }
 
@@ -1355,7 +1408,7 @@ Emit tokendata property map mutation event Implementation -
public(friend) fun emit_default_property_mutate_event(
+
friend fun emit_default_property_mutate_event(
     creator: &signer,
     collection: String,
     token: String,
@@ -1375,7 +1428,7 @@ Emit tokendata property map mutation event
     };
 
     initialize_token_event_store(creator);
-    let token_event_store = borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    let token_event_store = &mut TokenEventStoreV1[creator_addr];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
             DefaultPropertyMutate {
@@ -1386,11 +1439,12 @@ Emit tokendata property map mutation event
                 old_values,
                 new_values,
             });
+    } else {
+        event::emit_event<DefaultPropertyMutateEvent>(
+            &mut token_event_store.default_property_mutate_events,
+            event,
+        );
     };
-    event::emit_event<DefaultPropertyMutateEvent>(
-        &mut token_event_store.default_property_mutate_events,
-        event,
-    );
 }
 
@@ -1414,7 +1468,7 @@ Emit description mutation event Implementation -
public(friend) fun emit_token_descrition_mutate_event(
+
friend fun emit_token_descrition_mutate_event(
     creator: &signer,
     collection: String,
     token: String,
@@ -1432,7 +1486,7 @@ Emit description mutation event
     };
 
     initialize_token_event_store(creator);
-    let token_event_store = borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    let token_event_store = &mut TokenEventStoreV1[creator_addr];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
             DescriptionMutate {
@@ -1442,11 +1496,12 @@ Emit description mutation event
                 old_description,
                 new_description,
             });
+    } else {
+        event::emit_event<DescriptionMutateEvent>(
+            &mut token_event_store.description_mutate_events,
+            event,
+        );
     };
-    event::emit_event<DescriptionMutateEvent>(
-        &mut token_event_store.description_mutate_events,
-        event,
-    );
 }
 
@@ -1470,7 +1525,7 @@ Emit royalty mutation event Implementation -
public(friend) fun emit_token_royalty_mutate_event(
+
friend fun emit_token_royalty_mutate_event(
     creator: &signer,
     collection: String,
     token: String,
@@ -1495,7 +1550,7 @@ Emit royalty mutation event
     };
 
     initialize_token_event_store(creator);
-    let token_event_store = borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    let token_event_store = &mut TokenEventStoreV1[creator_addr];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
             RoyaltyMutate {
@@ -1509,11 +1564,12 @@ Emit royalty mutation event
                 new_royalty_denominator,
                 new_royalty_payee_addr,
             });
+    } else {
+        event::emit_event<RoyaltyMutateEvent>(
+            &mut token_event_store.royalty_mutate_events,
+            event,
+        );
     };
-    event::emit_event<RoyaltyMutateEvent>(
-        &mut token_event_store.royalty_mutate_events,
-        event,
-    );
 }
 
@@ -1537,7 +1593,7 @@ Emit maximum mutation event Implementation -
public(friend) fun emit_token_maximum_mutate_event(
+
friend fun emit_token_maximum_mutate_event(
     creator: &signer,
     collection: String,
     token: String,
@@ -1555,7 +1611,7 @@ Emit maximum mutation event
     };
 
     initialize_token_event_store(creator);
-    let token_event_store =  borrow_global_mut<TokenEventStoreV1>(creator_addr);
+    let token_event_store =  &mut TokenEventStoreV1[creator_addr];
     if (std::features::module_event_migration_enabled()) {
         event::emit(
             MaximumMutate {
@@ -1565,11 +1621,12 @@ Emit maximum mutation event
                 old_maximum,
                 new_maximum,
             });
+    } else {
+        event::emit_event<MaxiumMutateEvent>(
+            &mut token_event_store.maximum_mutate_events,
+            event,
+        );
     };
-    event::emit_event<MaxiumMutateEvent>(
-        &mut token_event_store.maximum_mutate_events,
-        event,
-    );
 }
 
@@ -1583,7 +1640,7 @@ Emit maximum mutation event -
pragma verify = true;
+
pragma verify = false;
 pragma aborts_if_is_strict;
 
@@ -1600,8 +1657,7 @@ Emit maximum mutation event -
pragma verify = true;
-let addr = signer::address_of(acct);
+
let addr = signer::address_of(acct);
 include InitializeTokenEventStoreAbortsIf {creator : acct};
 
diff --git a/aptos-move/framework/aptos-token/doc/token_transfers.md b/aptos-move/framework/aptos-token/doc/token_transfers.md index 960c9e0ce2094..9d48024fcd9c5 100644 --- a/aptos-move/framework/aptos-token/doc/token_transfers.md +++ b/aptos-move/framework/aptos-token/doc/token_transfers.md @@ -9,10 +9,13 @@ This module provides the foundation for transferring of Tokens - [Resource `PendingClaims`](#0x3_token_transfers_PendingClaims) - [Struct `TokenOfferId`](#0x3_token_transfers_TokenOfferId) - [Struct `TokenOffer`](#0x3_token_transfers_TokenOffer) -- [Struct `TokenOfferEvent`](#0x3_token_transfers_TokenOfferEvent) +- [Struct `Offer`](#0x3_token_transfers_Offer) - [Struct `TokenCancelOfferEvent`](#0x3_token_transfers_TokenCancelOfferEvent) -- [Struct `TokenCancelOffer`](#0x3_token_transfers_TokenCancelOffer) +- [Struct `CancelOffer`](#0x3_token_transfers_CancelOffer) - [Struct `TokenClaimEvent`](#0x3_token_transfers_TokenClaimEvent) +- [Struct `Claim`](#0x3_token_transfers_Claim) +- [Struct `TokenOfferEvent`](#0x3_token_transfers_TokenOfferEvent) +- [Struct `TokenCancelOffer`](#0x3_token_transfers_TokenCancelOffer) - [Struct `TokenClaim`](#0x3_token_transfers_TokenClaim) - [Constants](#@Constants_0) - [Function `initialize_token_transfers`](#0x3_token_transfers_initialize_token_transfers) @@ -165,14 +168,14 @@ This module provides the foundation for transferring of Tokens - + -## Struct `TokenOfferEvent` +## Struct `Offer`
#[event]
-struct TokenOfferEvent has drop, store
+struct Offer has drop, store
 
@@ -183,6 +186,12 @@ This module provides the foundation for transferring of Tokens
+account: address +
+
+ +
+
to_address: address
@@ -245,14 +254,14 @@ This module provides the foundation for transferring of Tokens - + -## Struct `TokenCancelOffer` +## Struct `CancelOffer`
#[event]
-struct TokenCancelOffer has drop, store
+struct CancelOffer has drop, store
 
@@ -263,6 +272,12 @@ This module provides the foundation for transferring of Tokens
+account: address +
+
+ +
+
to_address: address
@@ -297,6 +312,134 @@ This module provides the foundation for transferring of Tokens +
+Fields + + +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `Claim` + + + +
#[event]
+struct Claim has drop, store
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TokenOfferEvent` + + + +
#[event]
+#[deprecated]
+struct TokenOfferEvent has drop, store
+
+ + + +
+Fields + + +
+
+to_address: address +
+
+ +
+
+token_id: token::TokenId +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `TokenCancelOffer` + + + +
#[event]
+#[deprecated]
+struct TokenCancelOffer has drop, store
+
+ + +
Fields @@ -332,6 +475,7 @@ This module provides the foundation for transferring of Tokens
#[event]
+#[deprecated]
 struct TokenClaim has drop, store
 
@@ -499,33 +643,35 @@ Token offer doesn't exist }; let pending_claims = - &mut borrow_global_mut<PendingClaims>(sender_addr).pending_claims; + &mut PendingClaims[sender_addr].pending_claims; let token_offer_id = create_token_offer_id(receiver, token_id); let token = token::withdraw_token(sender, token_id, amount); - if (!table::contains(pending_claims, token_offer_id)) { - table::add(pending_claims, token_offer_id, token); + if (!pending_claims.contains(token_offer_id)) { + pending_claims.add(token_offer_id, token); } else { - let dst_token = table::borrow_mut(pending_claims, token_offer_id); + let dst_token = pending_claims.borrow_mut(token_offer_id); token::merge(dst_token, token); }; if (std::features::module_event_migration_enabled()) { event::emit( - TokenOffer { + Offer { + account: sender_addr, to_address: receiver, token_id, amount, } ) - }; - event::emit_event<TokenOfferEvent>( - &mut borrow_global_mut<PendingClaims>(sender_addr).offer_events, - TokenOfferEvent { - to_address: receiver, - token_id, - amount, - }, - ); + } else { + event::emit_event<TokenOfferEvent>( + &mut PendingClaims[sender_addr].offer_events, + TokenOfferEvent { + to_address: receiver, + token_id, + amount, + }, + ); + } }
@@ -587,30 +733,32 @@ Token offer doesn't exist ) acquires PendingClaims { assert!(exists<PendingClaims>(sender), ETOKEN_OFFER_NOT_EXIST); let pending_claims = - &mut borrow_global_mut<PendingClaims>(sender).pending_claims; + &mut PendingClaims[sender].pending_claims; let token_offer_id = create_token_offer_id(signer::address_of(receiver), token_id); - assert!(table::contains(pending_claims, token_offer_id), error::not_found(ETOKEN_OFFER_NOT_EXIST)); - let tokens = table::remove(pending_claims, token_offer_id); + assert!(pending_claims.contains(token_offer_id), error::not_found(ETOKEN_OFFER_NOT_EXIST)); + let tokens = pending_claims.remove(token_offer_id); let amount = token::get_token_amount(&tokens); token::deposit_token(receiver, tokens); if (std::features::module_event_migration_enabled()) { event::emit( - TokenClaim { + Claim { + account: sender, to_address: signer::address_of(receiver), token_id, amount, } ) + } else { + event::emit_event<TokenClaimEvent>( + &mut PendingClaims[sender].claim_events, + TokenClaimEvent { + to_address: signer::address_of(receiver), + token_id, + amount, + }, + ); }; - event::emit_event<TokenClaimEvent>( - &mut borrow_global_mut<PendingClaims>(sender).claim_events, - TokenClaimEvent { - to_address: signer::address_of(receiver), - token_id, - amount, - }, - ); }
@@ -674,28 +822,30 @@ Token offer doesn't exist let token_offer_id = create_token_offer_id(receiver, token_id); assert!(exists<PendingClaims>(sender_addr), ETOKEN_OFFER_NOT_EXIST); let pending_claims = - &mut borrow_global_mut<PendingClaims>(sender_addr).pending_claims; - let token = table::remove(pending_claims, token_offer_id); + &mut PendingClaims[sender_addr].pending_claims; + let token = pending_claims.remove(token_offer_id); let amount = token::get_token_amount(&token); token::deposit_token(sender, token); if (std::features::module_event_migration_enabled()) { event::emit( - TokenCancelOffer { + CancelOffer { + account: sender_addr, to_address: receiver, token_id, amount, }, ) - }; - event::emit_event<TokenCancelOfferEvent>( - &mut borrow_global_mut<PendingClaims>(sender_addr).cancel_offer_events, - TokenCancelOfferEvent { - to_address: receiver, - token_id, - amount, - }, - ); + } else { + event::emit_event<TokenCancelOfferEvent>( + &mut PendingClaims[sender_addr].cancel_offer_events, + TokenCancelOfferEvent { + to_address: receiver, + token_id, + amount, + }, + ); + } }
@@ -709,11 +859,35 @@ Token offer doesn't exist -
pragma verify = true;
+
pragma verify = false;
 pragma aborts_if_is_strict;
 
+Get the amount from sender token + + + + + +
fun spce_get(
+   account_addr: address,
+   id: TokenId,
+   amount: u64
+): u64 {
+   use aptos_token::token::{TokenStore};
+   use aptos_std::table::{Self};
+   let tokens = global<TokenStore>(account_addr).tokens;
+   let balance = table::spec_get(tokens, id).amount;
+   if (balance > amount) {
+       amount
+   } else {
+       table::spec_get(tokens, id).amount
+   }
+}
+
+ + @@ -810,30 +984,6 @@ Abort according to the code
-Get the amount from sender token - - - - - -
fun spce_get(
-   account_addr: address,
-   id: TokenId,
-   amount: u64
-): u64 {
-   use aptos_token::token::{TokenStore};
-   use aptos_std::table::{Self};
-   let tokens = global<TokenStore>(account_addr).tokens;
-   let balance = table::spec_get(tokens, id).amount;
-   if (balance > amount) {
-       amount
-   } else {
-       table::spec_get(tokens, id).amount
-   }
-}
-
- - diff --git a/aptos-move/framework/aptos-token/sources/property_map.move b/aptos-move/framework/aptos-token/sources/property_map.move index 45546cc402644..087119590c5a0 100644 --- a/aptos-move/framework/aptos-token/sources/property_map.move +++ b/aptos-move/framework/aptos-token/sources/property_map.move @@ -4,7 +4,6 @@ /// It also supports deserializing property value to it original type. module aptos_token::property_map { use std::bcs; - use std::vector; use std::error; use std::string::{Self, String}; use aptos_std::from_bcs; @@ -62,23 +61,20 @@ module aptos_token::property_map { values: vector>, types: vector ): PropertyMap { - let length = vector::length(&keys); + let length = keys.length(); assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(EPROPERTY_NUMBER_EXCEED_LIMIT)); - assert!(length == vector::length(&values), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); - assert!(length == vector::length(&types), error::invalid_argument(EKEY_COUNT_NOT_MATCH_TYPE_COUNT)); + assert!(length == values.length(), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); + assert!(length == types.length(), error::invalid_argument(EKEY_COUNT_NOT_MATCH_TYPE_COUNT)); let properties = empty(); - let i = 0; - while (i < length) { - let key = *vector::borrow(&keys, i); - assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); - simple_map::add( - &mut properties.map, + for (i in 0..length) { + let key = keys[i]; + assert!(key.length() <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); + properties.map.add( key, - PropertyValue { value: *vector::borrow(&values, i), type: *vector::borrow(&types, i) } + PropertyValue { value: values[i], type: types[i] } ); - i = i + 1; }; properties } @@ -88,19 +84,17 @@ module aptos_token::property_map { keys: vector, values: vector ): PropertyMap { - let length = vector::length(&keys); + let length = keys.length(); assert!(length <= MAX_PROPERTY_MAP_SIZE, error::invalid_argument(EPROPERTY_NUMBER_EXCEED_LIMIT)); - assert!(length == vector::length(&values), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); + assert!(length == values.length(), error::invalid_argument(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); let properties = empty(); - let i = 0; - while (i < length) { - let key = *vector::borrow(&keys, i); - let val = *vector::borrow(&values, i); - assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); - add(&mut properties, key, val); - i = i + 1; + for (i in 0..length) { + let key = keys[i]; + let val = values[i]; + assert!(key.length() <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); + properties.add(key, val); }; properties } @@ -111,136 +105,132 @@ module aptos_token::property_map { } } - public fun contains_key(map: &PropertyMap, key: &String): bool { - simple_map::contains_key(&map.map, key) + public fun contains_key(self: &PropertyMap, key: &String): bool { + self.map.contains_key(key) } - public fun add(map: &mut PropertyMap, key: String, value: PropertyValue) { - assert!(string::length(&key) <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); - assert!(simple_map::length(&map.map) < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT)); - simple_map::add(&mut map.map, key, value); + public fun add(self: &mut PropertyMap, key: String, value: PropertyValue) { + assert!(key.length() <= MAX_PROPERTY_NAME_LENGTH, error::invalid_argument(EPROPERTY_MAP_NAME_TOO_LONG)); + assert!(self.map.length() < MAX_PROPERTY_MAP_SIZE, error::invalid_state(EPROPERTY_NUMBER_EXCEED_LIMIT)); + self.map.add(key, value); } - public fun length(map: &PropertyMap): u64 { - simple_map::length(&map.map) + public fun length(self: &PropertyMap): u64 { + self.map.length() } - public fun borrow(map: &PropertyMap, key: &String): &PropertyValue { - let found = contains_key(map, key); + public fun borrow(self: &PropertyMap, key: &String): &PropertyValue { + let found = self.contains_key(key); assert!(found, EPROPERTY_NOT_EXIST); - simple_map::borrow(&map.map, key) + self.map.borrow(key) } /// Return all the keys in the property map in the order they are added. - public fun keys(map: &PropertyMap): vector { - simple_map::keys(&map.map) + public fun keys(self: &PropertyMap): vector { + self.map.keys() } /// Return the types of all properties in the property map in the order they are added. - public fun types(map: &PropertyMap): vector { - vector::map_ref(&simple_map::values(&map.map), |v| { - let v: &PropertyValue = v; + public fun types(self: &PropertyMap): vector { + self.map.values().map_ref(|v| { v.type }) } /// Return the values of all properties in the property map in the order they are added. - public fun values(map: &PropertyMap): vector> { - vector::map_ref(&simple_map::values(&map.map), |v| { - let v: &PropertyValue = v; + public fun values(self: &PropertyMap): vector> { + self.map.values().map_ref(|v| { v.value }) } - public fun read_string(map: &PropertyMap, key: &String): String { - let prop = borrow(map, key); + public fun read_string(self: &PropertyMap, key: &String): String { + let prop = self.borrow(key); assert!(prop.type == string::utf8(b"0x1::string::String"), error::invalid_state(ETYPE_NOT_MATCH)); from_bcs::to_string(prop.value) } - public fun read_u8(map: &PropertyMap, key: &String): u8 { - let prop = borrow(map, key); + public fun read_u8(self: &PropertyMap, key: &String): u8 { + let prop = self.borrow(key); assert!(prop.type == string::utf8(b"u8"), error::invalid_state(ETYPE_NOT_MATCH)); from_bcs::to_u8(prop.value) } - public fun read_u64(map: &PropertyMap, key: &String): u64 { - let prop = borrow(map, key); + public fun read_u64(self: &PropertyMap, key: &String): u64 { + let prop = self.borrow(key); assert!(prop.type == string::utf8(b"u64"), error::invalid_state(ETYPE_NOT_MATCH)); from_bcs::to_u64(prop.value) } - public fun read_address(map: &PropertyMap, key: &String): address { - let prop = borrow(map, key); + public fun read_address(self: &PropertyMap, key: &String): address { + let prop = self.borrow(key); assert!(prop.type == string::utf8(b"address"), error::invalid_state(ETYPE_NOT_MATCH)); from_bcs::to_address(prop.value) } - public fun read_u128(map: &PropertyMap, key: &String): u128 { - let prop = borrow(map, key); + public fun read_u128(self: &PropertyMap, key: &String): u128 { + let prop = self.borrow(key); assert!(prop.type == string::utf8(b"u128"), error::invalid_state(ETYPE_NOT_MATCH)); from_bcs::to_u128(prop.value) } - public fun read_bool(map: &PropertyMap, key: &String): bool { - let prop = borrow(map, key); + public fun read_bool(self: &PropertyMap, key: &String): bool { + let prop = self.borrow(key); assert!(prop.type == string::utf8(b"bool"), error::invalid_state(ETYPE_NOT_MATCH)); from_bcs::to_bool(prop.value) } - public fun borrow_value(property: &PropertyValue): vector { - property.value + public fun borrow_value(self: &PropertyValue): vector { + self.value } - public fun borrow_type(property: &PropertyValue): String { - property.type + public fun borrow_type(self: &PropertyValue): String { + self.type } public fun remove( - map: &mut PropertyMap, + self: &mut PropertyMap, key: &String ): (String, PropertyValue) { - let found = contains_key(map, key); + let found = self.contains_key(key); assert!(found, error::not_found(EPROPERTY_NOT_EXIST)); - simple_map::remove(&mut map.map, key) + self.map.remove(key) } /// Update the property in the existing property map /// Allow updating existing keys' value and add new key-value pairs public fun update_property_map( - map: &mut PropertyMap, + self: &mut PropertyMap, keys: vector, values: vector>, types: vector, ) { - let key_len = vector::length(&keys); - let val_len = vector::length(&values); - let typ_len = vector::length(&types); + let key_len = keys.length(); + let val_len = values.length(); + let typ_len = types.length(); assert!(key_len == val_len, error::invalid_state(EKEY_COUNT_NOT_MATCH_VALUE_COUNT)); assert!(key_len == typ_len, error::invalid_state(EKEY_COUNT_NOT_MATCH_TYPE_COUNT)); - let i = 0; - while (i < key_len) { - let key = vector::borrow(&keys, i); + for (i in 0..key_len) { + let key = &keys[i]; let prop_val = PropertyValue { - value: *vector::borrow(&values, i), - type: *vector::borrow(&types, i), + value: values[i], + type: types[i], }; - if (contains_key(map, key)) { - update_property_value(map, key, prop_val); + if (self.contains_key(key)) { + self.update_property_value(key, prop_val); } else { - add(map, *key, prop_val); + self.add(*key, prop_val); }; - i = i + 1; } } public fun update_property_value( - map: &mut PropertyMap, + self: &mut PropertyMap, key: &String, value: PropertyValue ) { - let property_val = simple_map::borrow_mut(&mut map.map, key); + let property_val = self.map.borrow_mut(key); *property_val = value; } @@ -297,39 +287,37 @@ module aptos_token::property_map { #[test] fun test_add_property(): PropertyMap { let properties = create_property_list(); - add( - &mut properties, utf8(b"level"), - PropertyValue { - value: b"1", - type: utf8(b"integer") - }); + properties.add(utf8(b"level"), PropertyValue { + value: b"1", + type: utf8(b"integer") + }); assert!( - borrow(&properties, &utf8(b"level")).value == b"1", + properties.borrow(&utf8(b"level")).value == b"1", EPROPERTY_NOT_EXIST); properties } #[test] fun test_get_property_keys() { - assert!(keys(&create_property_list()) == test_keys(), 0); + assert!(create_property_list().keys() == test_keys(), 0); } #[test] fun test_get_property_types() { - assert!(types(&create_property_list()) == test_types(), 0); + assert!(create_property_list().types() == test_types(), 0); } #[test] fun test_get_property_values() { - assert!(values(&create_property_list()) == test_values(), 0); + assert!(create_property_list().values() == test_values(), 0); } #[test] fun test_update_property(): PropertyMap { let properties = create_property_list(); - update_property_value(&mut properties, &utf8(b"attack"), PropertyValue { value: b"7", type: utf8(b"integer") }); + properties.update_property_value(&utf8(b"attack"), PropertyValue { value: b"7", type: utf8(b"integer") }); assert!( - borrow(&properties, &utf8(b"attack")).value == b"7", + properties.borrow(&utf8(b"attack")).value == b"7", 1 ); properties @@ -338,9 +326,9 @@ module aptos_token::property_map { #[test] fun test_remove_property(): PropertyMap { let properties = create_property_list(); - assert!(length(&mut properties) == 3, 1); - let (_, _) = remove(&mut properties, &utf8(b"attack")); - assert!(length(&properties) == 2, 1); + assert!(properties.length() == 3, 1); + let (_, _) = properties.remove(&utf8(b"attack")); + assert!(properties.length() == 2, 1); properties } @@ -358,8 +346,8 @@ module aptos_token::property_map { let values = vector>[ bcs::to_bytes(&10), bcs::to_bytes(&false) ]; let types = vector[ utf8(b"u8"), utf8(b"bool")]; let plist = new(keys, values, types); - assert!(!read_bool(&plist, &utf8(b"mutable")), 1); - assert!(read_u8(&plist, &utf8(b"attack")) == 10, 1); + assert!(!plist.read_bool(&utf8(b"mutable")), 1); + assert!(plist.read_u8(&utf8(b"attack")) == 10, 1); } #[test] @@ -367,8 +355,8 @@ module aptos_token::property_map { let data: address = @0xcafe; let pv = create_property_value(&data); let pm = create_property_list(); - add(&mut pm, string::utf8(b"addr"), pv); - assert!(read_address(&pm, &string::utf8(b"addr")) == data, 1) + pm.add(string::utf8(b"addr"), pv); + assert!(pm.read_address(&string::utf8(b"addr")) == data, 1) } #[test] @@ -378,6 +366,6 @@ module aptos_token::property_map { let pvs = vector[create_property_value(&data1), create_property_value(&data2)]; let keys = vector[string::utf8(b"addr"), string::utf8(b"flag")]; let pm = new_with_key_and_property_value(keys, pvs); - assert!(length(&pm) == 2, 1); + assert!(pm.length() == 2, 1); } } diff --git a/aptos-move/framework/aptos-token/sources/property_map.spec.move b/aptos-move/framework/aptos-token/sources/property_map.spec.move index b97e90b2c1bae..6ffd2e221b47b 100644 --- a/aptos-move/framework/aptos-token/sources/property_map.spec.move +++ b/aptos-move/framework/aptos-token/sources/property_map.spec.move @@ -4,7 +4,7 @@ spec aptos_token::property_map { pragma aborts_if_is_strict; let MAX_PROPERTY_MAP_SIZE = 1000; - let MAX_PROPERTY_NAME_LENGTH = 128; + let MAX_PROPERTY_NAME_LENGTH = 128; } spec new ( @@ -17,8 +17,8 @@ spec aptos_token::property_map { let length = len(keys); aborts_if !(length <= MAX_PROPERTY_MAP_SIZE); - aborts_if !(length == vector::length(values)); - aborts_if !(length == vector::length(types)); + aborts_if !(length == len(values)); + aborts_if !(length == len(types)); } spec new_with_key_and_property_value ( @@ -27,7 +27,7 @@ spec aptos_token::property_map { ): PropertyMap { // TODO: Can't handle abort in loop. pragma aborts_if_is_partial; - let length = vector::length(keys); + let length = len(keys); aborts_if !(length <= MAX_PROPERTY_MAP_SIZE); aborts_if !(length == len(values)); } @@ -36,135 +36,135 @@ spec aptos_token::property_map { aborts_if false; } - spec contains_key(map: &PropertyMap, key: &String): bool { + spec contains_key(self: &PropertyMap, key: &String): bool { aborts_if false; } - spec add(map: &mut PropertyMap, key: String, value: PropertyValue) { + spec add(self: &mut PropertyMap, key: String, value: PropertyValue) { use aptos_framework::simple_map; - aborts_if !(string::length(key) <= MAX_PROPERTY_NAME_LENGTH); - aborts_if !(!simple_map::spec_contains_key(map.map, key)); - aborts_if !(simple_map::spec_len(map.map) < MAX_PROPERTY_MAP_SIZE); + aborts_if !(key.length() <= MAX_PROPERTY_NAME_LENGTH); + aborts_if !(!simple_map::spec_contains_key(self.map, key)); + aborts_if !(simple_map::spec_len(self.map) < MAX_PROPERTY_MAP_SIZE); } - spec length(map: &PropertyMap): u64 { + spec length(self: &PropertyMap): u64 { aborts_if false; } - spec keys(map: &PropertyMap): vector { + spec keys(self: &PropertyMap): vector { pragma verify = false; } - spec types(map: &PropertyMap): vector { + spec types(self: &PropertyMap): vector { pragma verify = false; } - spec values(map: &PropertyMap): vector> { + spec values(self: &PropertyMap): vector> { pragma verify = false; } - spec borrow(map: &PropertyMap, key: &String): &PropertyValue { + spec borrow(self: &PropertyMap, key: &String): &PropertyValue { use aptos_framework::simple_map; - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); } /// Check utf8 for correctness and whether equal /// to `prop.type` - spec read_string(map: &PropertyMap, key: &String): String { + spec read_string(self: &PropertyMap, key: &String): String { use std::string; use aptos_framework::simple_map; pragma aborts_if_is_partial; // TODO: Unable to handle abort from `from_bcs::to_string` because there is a function call at assert. - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); aborts_if !string::spec_internal_check_utf8(b"0x1::string::String"); - let prop = simple_map::spec_get(map.map, key); + let prop = simple_map::spec_get(self.map, key); aborts_if prop.type != spec_utf8(b"0x1::string::String"); aborts_if !aptos_std::from_bcs::deserializable(prop.value); } spec fun spec_utf8(bytes: vector): String { - String{bytes} + String { bytes } } - spec read_u8(map: &PropertyMap, key: &String): u8 { + spec read_u8(self: &PropertyMap, key: &String): u8 { use std::string; use aptos_framework::simple_map; let str = b"u8"; - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); aborts_if !string::spec_internal_check_utf8(str); - let prop = simple_map::spec_get(map.map, key); + let prop = simple_map::spec_get(self.map, key); aborts_if prop.type != spec_utf8(str); aborts_if !aptos_std::from_bcs::deserializable(prop.value); } - spec read_u64(map: &PropertyMap, key: &String): u64 { + spec read_u64(self: &PropertyMap, key: &String): u64 { use std::string; use aptos_framework::simple_map; let str = b"u64"; - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); aborts_if !string::spec_internal_check_utf8(str); - let prop = simple_map::spec_get(map.map, key); + let prop = simple_map::spec_get(self.map, key); aborts_if prop.type != spec_utf8(str); aborts_if !aptos_std::from_bcs::deserializable(prop.value); } - spec read_address(map: &PropertyMap, key: &String): address { + spec read_address(self: &PropertyMap, key: &String): address { use std::string; use aptos_framework::simple_map; let str = b"address"; - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); aborts_if !string::spec_internal_check_utf8(str); - let prop = simple_map::spec_get(map.map, key); + let prop = simple_map::spec_get(self.map, key); aborts_if prop.type != spec_utf8(str); aborts_if !aptos_std::from_bcs::deserializable
(prop.value); } - spec read_u128(map: &PropertyMap, key: &String): u128 { + spec read_u128(self: &PropertyMap, key: &String): u128 { use std::string; use aptos_framework::simple_map; let str = b"u128"; - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); aborts_if !string::spec_internal_check_utf8(str); - let prop = simple_map::spec_get(map.map, key); + let prop = simple_map::spec_get(self.map, key); aborts_if prop.type != spec_utf8(str); aborts_if !aptos_std::from_bcs::deserializable(prop.value); } - spec read_bool(map: &PropertyMap, key: &String): bool { + spec read_bool(self: &PropertyMap, key: &String): bool { use std::string; use aptos_framework::simple_map; let str = b"bool"; - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); aborts_if !string::spec_internal_check_utf8(str); - let prop = simple_map::spec_get(map.map, key); + let prop = simple_map::spec_get(self.map, key); aborts_if prop.type != spec_utf8(str); aborts_if !aptos_std::from_bcs::deserializable(prop.value); } - spec borrow_value(property: &PropertyValue): vector { + spec borrow_value(self: &PropertyValue): vector { aborts_if false; } - spec borrow_type(property: &PropertyValue): String { + spec borrow_type(self: &PropertyValue): String { aborts_if false; } spec remove ( - map: &mut PropertyMap, + self: &mut PropertyMap, key: &String ): (String, PropertyValue) { - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); } spec update_property_map ( - map: &mut PropertyMap, + self: &mut PropertyMap, keys: vector, values: vector>, types: vector, @@ -179,11 +179,11 @@ spec aptos_token::property_map { } spec update_property_value ( - map: &mut PropertyMap, + self: &mut PropertyMap, key: &String, value: PropertyValue ) { - aborts_if !simple_map::spec_contains_key(map.map, key); + aborts_if !simple_map::spec_contains_key(self.map, key); } spec create_property_value_raw ( diff --git a/aptos-move/framework/aptos-token/sources/token.move b/aptos-move/framework/aptos-token/sources/token.move index d11f69a379351..063865751cd47 100644 --- a/aptos-move/framework/aptos-token/sources/token.move +++ b/aptos-move/framework/aptos-token/sources/token.move @@ -289,6 +289,15 @@ module aptos_token::token { amount: u64, } + #[event] + /// Set of data sent to the event stream during a receive + struct TokenDeposit has drop, store { + account: address, + id: TokenId, + amount: u64, + } + + #[deprecated] #[event] /// Set of data sent to the event stream during a receive struct Deposit has drop, store { @@ -302,6 +311,7 @@ module aptos_token::token { amount: u64, } + #[deprecated] #[event] /// Set of data sent to the event stream during a withdrawal struct Withdraw has drop, store { @@ -309,6 +319,14 @@ module aptos_token::token { amount: u64, } + #[event] + /// Set of data sent to the event stream during a withdrawal + struct TokenWithdraw has drop, store { + account: address, + id: TokenId, + amount: u64, + } + /// token creation event id of token created struct CreateTokenDataEvent has drop, store { id: TokenDataId, @@ -325,6 +343,7 @@ module aptos_token::token { property_types: vector, } + #[deprecated] #[event] struct CreateTokenData has drop, store { id: TokenDataId, @@ -341,30 +360,64 @@ module aptos_token::token { property_types: vector, } + #[event] + struct TokenDataCreation has drop, store { + creator: address, + id: TokenDataId, + description: String, + maximum: u64, + uri: String, + royalty_payee_address: address, + royalty_points_denominator: u64, + royalty_points_numerator: u64, + name: String, + mutability_config: TokenMutabilityConfig, + property_keys: vector, + property_values: vector>, + property_types: vector, + } + /// mint token event. This event triggered when creator adds more supply to existing token struct MintTokenEvent has drop, store { id: TokenDataId, amount: u64, } + #[deprecated] #[event] struct MintToken has drop, store { id: TokenDataId, amount: u64, } + #[event] + struct Mint has drop, store { + creator: address, + id: TokenDataId, + amount: u64, + } + /// struct BurnTokenEvent has drop, store { id: TokenId, amount: u64, } + #[deprecated] #[event] struct BurnToken has drop, store { id: TokenId, amount: u64, } + #[event] + struct Burn has drop, store { + account: address, + id: TokenId, + amount: u64, + } + + /// struct MutateTokenPropertyMapEvent has drop, store { old_id: TokenId, @@ -374,6 +427,7 @@ module aptos_token::token { types: vector, } + #[deprecated] #[event] struct MutateTokenPropertyMap has drop, store { old_id: TokenId, @@ -383,6 +437,16 @@ module aptos_token::token { types: vector, } + #[event] + struct MutatePropertyMap has drop, store { + account: address, + old_id: TokenId, + new_id: TokenId, + keys: vector, + values: vector>, + types: vector, + } + /// create collection event with creator address and collection name struct CreateCollectionEvent has drop, store { creator: address, @@ -503,7 +567,6 @@ module aptos_token::token { types: vector, ) acquires Collections, TokenStore { assert!(signer::address_of(account) == creator, error::not_found(ENO_MUTATE_CAPABILITY)); - let i = 0; let token_id = create_token_id_raw( creator, collection_name, @@ -511,9 +574,8 @@ module aptos_token::token { token_property_version, ); // give a new property_version for each token - while (i < amount) { + for (i in 0..amount) { mutate_one_token(account, token_owner, token_id, keys, values, types); - i = i + 1; }; } @@ -537,7 +599,7 @@ module aptos_token::token { public entry fun opt_in_direct_transfer(account: &signer, opt_in: bool) acquires TokenStore { let addr = signer::address_of(account); initialize_token_store(account); - let opt_in_flag = &mut borrow_global_mut(addr).direct_transfer; + let opt_in_flag = &mut TokenStore[addr].direct_transfer; *opt_in_flag = opt_in; token_event_store::emit_token_opt_in_event(account, opt_in); } @@ -576,54 +638,49 @@ module aptos_token::token { error::not_found(ECOLLECTIONS_NOT_PUBLISHED), ); - let collections = borrow_global_mut(creator_address); + let collections = &mut Collections[creator_address]; assert!( - table::contains(&collections.token_data, token_id.token_data_id), + collections.token_data.contains(token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED), ); - let token_data = table::borrow_mut( - &mut collections.token_data, - token_id.token_data_id, - ); + let token_data = collections.token_data.borrow_mut(token_id.token_data_id); // The property should be explicitly set in the property_map for creator to burn the token assert!( - property_map::contains_key(&token_data.default_properties, &string::utf8(BURNABLE_BY_CREATOR)), + token_data.default_properties.contains_key(&string::utf8(BURNABLE_BY_CREATOR)), error::permission_denied(ECREATOR_CANNOT_BURN_TOKEN) ); - let burn_by_creator_flag = property_map::read_bool(&token_data.default_properties, &string::utf8(BURNABLE_BY_CREATOR)); + let burn_by_creator_flag = token_data.default_properties.read_bool(&string::utf8(BURNABLE_BY_CREATOR)); assert!(burn_by_creator_flag, error::permission_denied(ECREATOR_CANNOT_BURN_TOKEN)); // Burn the tokens. let Token { id: _, amount: burned_amount, token_properties: _ } = withdraw_with_event_internal(owner, token_id, amount); - let token_store = borrow_global_mut(owner); + let token_store = &mut TokenStore[owner]; if (std::features::module_event_migration_enabled()) { - event::emit(BurnToken { id: token_id, amount: burned_amount }); + event::emit(Burn { account: owner, id: token_id, amount: burned_amount }); + } else { + event::emit_event( + &mut token_store.burn_events, + BurnTokenEvent { id: token_id, amount: burned_amount } + ); }; - event::emit_event( - &mut token_store.burn_events, - BurnTokenEvent { id: token_id, amount: burned_amount } - ); if (token_data.maximum > 0) { - token_data.supply = token_data.supply - burned_amount; + token_data.supply -= burned_amount; // Delete the token_data if supply drops to 0. if (token_data.supply == 0) { - destroy_token_data(table::remove(&mut collections.token_data, token_id.token_data_id)); + destroy_token_data(collections.token_data.remove(token_id.token_data_id)); // update the collection supply - let collection_data = table::borrow_mut( - &mut collections.collection_data, - token_id.token_data_id.collection - ); + let collection_data = collections.collection_data.borrow_mut(token_id.token_data_id.collection); if (collection_data.maximum > 0) { - collection_data.supply = collection_data.supply - 1; + collection_data.supply -= 1; // delete the collection data if the collection supply equals 0 if (collection_data.supply == 0) { - destroy_collection_data(table::remove(&mut collections.collection_data, collection_data.name)); + destroy_collection_data(collections.collection_data.remove(collection_data.name)); }; }; }; @@ -647,62 +704,54 @@ module aptos_token::token { error::not_found(ECOLLECTIONS_NOT_PUBLISHED), ); - let collections = borrow_global_mut(creator_addr); + let collections = &mut Collections[creator_addr]; assert!( - table::contains(&collections.token_data, token_id.token_data_id), + collections.token_data.contains(token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED), ); - let token_data = table::borrow_mut( - &mut collections.token_data, - token_id.token_data_id, - ); + let token_data = collections.token_data.borrow_mut(token_id.token_data_id); assert!( - property_map::contains_key(&token_data.default_properties, &string::utf8(BURNABLE_BY_OWNER)), + token_data.default_properties.contains_key(&string::utf8(BURNABLE_BY_OWNER)), error::permission_denied(EOWNER_CANNOT_BURN_TOKEN) ); - let burn_by_owner_flag = property_map::read_bool(&token_data.default_properties, &string::utf8(BURNABLE_BY_OWNER)); + let burn_by_owner_flag = token_data.default_properties.read_bool(&string::utf8(BURNABLE_BY_OWNER)); assert!(burn_by_owner_flag, error::permission_denied(EOWNER_CANNOT_BURN_TOKEN)); // Burn the tokens. let Token { id: _, amount: burned_amount, token_properties: _ } = withdraw_token(owner, token_id, amount); - let token_store = borrow_global_mut(signer::address_of(owner)); + let token_store = &mut TokenStore[signer::address_of(owner)]; if (std::features::module_event_migration_enabled()) { - event::emit(BurnToken { id: token_id, amount: burned_amount }); + event::emit(Burn { account: signer::address_of(owner), id: token_id, amount: burned_amount }); + } else { + event::emit_event( + &mut token_store.burn_events, + BurnTokenEvent { id: token_id, amount: burned_amount } + ); }; - event::emit_event( - &mut token_store.burn_events, - BurnTokenEvent { id: token_id, amount: burned_amount } - ); // Decrease the supply correspondingly by the amount of tokens burned. - let token_data = table::borrow_mut( - &mut collections.token_data, - token_id.token_data_id, - ); + let token_data = collections.token_data.borrow_mut(token_id.token_data_id); // only update the supply if we tracking the supply and maximal // maximal == 0 is reserved for unlimited token and collection with no tracking info. if (token_data.maximum > 0) { - token_data.supply = token_data.supply - burned_amount; + token_data.supply -= burned_amount; // Delete the token_data if supply drops to 0. if (token_data.supply == 0) { - destroy_token_data(table::remove(&mut collections.token_data, token_id.token_data_id)); + destroy_token_data(collections.token_data.remove(token_id.token_data_id)); // update the collection supply - let collection_data = table::borrow_mut( - &mut collections.collection_data, - token_id.token_data_id.collection - ); + let collection_data = collections.collection_data.borrow_mut(token_id.token_data_id.collection); // only update and check the supply for unlimited collection if (collection_data.maximum > 0){ - collection_data.supply = collection_data.supply - 1; + collection_data.supply -= 1; // delete the collection data if the collection supply equals 0 if (collection_data.supply == 0) { - destroy_collection_data(table::remove(&mut collections.collection_data, collection_data.name)); + destroy_collection_data(collections.collection_data.remove(collection_data.name)); }; }; }; @@ -717,17 +766,21 @@ module aptos_token::token { public fun mutate_collection_description(creator: &signer, collection_name: String, description: String) acquires Collections { let creator_address = signer::address_of(creator); assert_collection_exists(creator_address, collection_name); - let collection_data = table::borrow_mut(&mut borrow_global_mut(creator_address).collection_data, collection_name); + let collection_data = Collections[creator_address].collection_data.borrow_mut( + collection_name + ); assert!(collection_data.mutability_config.description, error::permission_denied(EFIELD_NOT_MUTABLE)); token_event_store::emit_collection_description_mutate_event(creator, collection_name, collection_data.description, description); collection_data.description = description; } public fun mutate_collection_uri(creator: &signer, collection_name: String, uri: String) acquires Collections { - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); let creator_address = signer::address_of(creator); assert_collection_exists(creator_address, collection_name); - let collection_data = table::borrow_mut(&mut borrow_global_mut(creator_address).collection_data, collection_name); + let collection_data = Collections[creator_address].collection_data.borrow_mut( + collection_name + ); assert!(collection_data.mutability_config.uri, error::permission_denied(EFIELD_NOT_MUTABLE)); token_event_store::emit_collection_uri_mutate_event(creator, collection_name, collection_data.uri , uri); collection_data.uri = uri; @@ -736,7 +789,9 @@ module aptos_token::token { public fun mutate_collection_maximum(creator: &signer, collection_name: String, maximum: u64) acquires Collections { let creator_address = signer::address_of(creator); assert_collection_exists(creator_address, collection_name); - let collection_data = table::borrow_mut(&mut borrow_global_mut(creator_address).collection_data, collection_name); + let collection_data = Collections[creator_address].collection_data.borrow_mut( + collection_name + ); // cannot change maximum from 0 and cannot change maximum to 0 assert!(collection_data.maximum != 0 && maximum != 0, error::invalid_argument(EINVALID_MAXIMUM)); assert!(maximum >= collection_data.supply, error::invalid_argument(EINVALID_MAXIMUM)); @@ -748,8 +803,8 @@ module aptos_token::token { // Functions for mutating TokenData fields public fun mutate_tokendata_maximum(creator: &signer, token_data_id: TokenDataId, maximum: u64) acquires Collections { assert_tokendata_exists(creator, token_data_id); - let all_token_data = &mut borrow_global_mut(token_data_id.creator).token_data; - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[token_data_id.creator].token_data; + let token_data = all_token_data.borrow_mut(token_data_id); // cannot change maximum from 0 and cannot change maximum to 0 assert!(token_data.maximum != 0 && maximum != 0, error::invalid_argument(EINVALID_MAXIMUM)); assert!(maximum >= token_data.supply, error::invalid_argument(EINVALID_MAXIMUM)); @@ -763,11 +818,11 @@ module aptos_token::token { token_data_id: TokenDataId, uri: String ) acquires Collections { - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); assert_tokendata_exists(creator, token_data_id); - let all_token_data = &mut borrow_global_mut(token_data_id.creator).token_data; - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[token_data_id.creator].token_data; + let token_data = all_token_data.borrow_mut(token_data_id); assert!(token_data.mutability_config.uri, error::permission_denied(EFIELD_NOT_MUTABLE)); token_event_store::emit_token_uri_mutate_event(creator, token_data_id.collection, token_data_id.name, token_data.uri ,uri); token_data.uri = uri; @@ -776,8 +831,8 @@ module aptos_token::token { public fun mutate_tokendata_royalty(creator: &signer, token_data_id: TokenDataId, royalty: Royalty) acquires Collections { assert_tokendata_exists(creator, token_data_id); - let all_token_data = &mut borrow_global_mut(token_data_id.creator).token_data; - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[token_data_id.creator].token_data; + let token_data = all_token_data.borrow_mut(token_data_id); assert!(token_data.mutability_config.royalty, error::permission_denied(EFIELD_NOT_MUTABLE)); token_event_store::emit_token_royalty_mutate_event( @@ -797,8 +852,8 @@ module aptos_token::token { public fun mutate_tokendata_description(creator: &signer, token_data_id: TokenDataId, description: String) acquires Collections { assert_tokendata_exists(creator, token_data_id); - let all_token_data = &mut borrow_global_mut(token_data_id.creator).token_data; - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[token_data_id.creator].token_data; + let token_data = all_token_data.borrow_mut(token_data_id); assert!(token_data.mutability_config.description, error::permission_denied(EFIELD_NOT_MUTABLE)); token_event_store::emit_token_descrition_mutate_event(creator, token_data_id.collection, token_data_id.name, token_data.description, description); token_data.description = description; @@ -813,35 +868,33 @@ module aptos_token::token { types: vector, ) acquires Collections { assert_tokendata_exists(creator, token_data_id); - let key_len = vector::length(&keys); - let val_len = vector::length(&values); - let typ_len = vector::length(&types); + let key_len = keys.length(); + let val_len = values.length(); + let typ_len = types.length(); assert!(key_len == val_len, error::invalid_state(ETOKEN_PROPERTIES_COUNT_NOT_MATCH)); assert!(key_len == typ_len, error::invalid_state(ETOKEN_PROPERTIES_COUNT_NOT_MATCH)); - let all_token_data = &mut borrow_global_mut(token_data_id.creator).token_data; - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[token_data_id.creator].token_data; + let token_data = all_token_data.borrow_mut(token_data_id); assert!(token_data.mutability_config.properties, error::permission_denied(EFIELD_NOT_MUTABLE)); - let i: u64 = 0; let old_values: vector> = vector::empty(); let new_values: vector = vector::empty(); assert_non_standard_reserved_property(&keys); - while (i < vector::length(&keys)){ - let key = vector::borrow(&keys, i); - let old_pv = if (property_map::contains_key(&token_data.default_properties, key)) { - option::some(*property_map::borrow(&token_data.default_properties, key)) + for (i in 0..keys.length()){ + let key = keys.borrow(i); + let old_pv = if (token_data.default_properties.contains_key(key)) { + option::some(*token_data.default_properties.borrow(key)) } else { option::none() }; - vector::push_back(&mut old_values, old_pv); - let new_pv = property_map::create_property_value_raw(*vector::borrow(&values, i), *vector::borrow(&types, i)); - vector::push_back(&mut new_values, new_pv); - if (option::is_some(&old_pv)) { - property_map::update_property_value(&mut token_data.default_properties, key, new_pv); + old_values.push_back(old_pv); + let new_pv = property_map::create_property_value_raw(values[i], types[i]); + new_values.push_back(new_pv); + if (old_pv.is_some()) { + token_data.default_properties.update_property_value(key, new_pv); } else { - property_map::add(&mut token_data.default_properties, *key, new_pv); + token_data.default_properties.add(*key, new_pv); }; - i = i + 1; }; token_event_store::emit_default_property_mutate_event(creator, token_data_id.collection, token_data_id.name, keys, old_values, new_values); } @@ -859,22 +912,22 @@ module aptos_token::token { assert!(signer::address_of(account) == creator, error::permission_denied(ENO_MUTATE_CAPABILITY)); // validate if the properties is mutable assert!(exists(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &mut borrow_global_mut( + let all_token_data = &mut Collections[ creator - ).token_data; + ].token_data; - assert!(table::contains(all_token_data, token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow_mut(all_token_data, token_id.token_data_id); + assert!(all_token_data.contains(token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let token_data = all_token_data.borrow_mut(token_id.token_data_id); - // if default property is mutatable, token property is alwasy mutable + // if default property is mutatable, token property is always mutable // we only need to check TOKEN_PROPERTY_MUTABLE when default property is immutable if (!token_data.mutability_config.properties) { assert!( - property_map::contains_key(&token_data.default_properties, &string::utf8(TOKEN_PROPERTY_MUTABLE)), + token_data.default_properties.contains_key(&string::utf8(TOKEN_PROPERTY_MUTABLE)), error::permission_denied(EFIELD_NOT_MUTABLE) ); - let token_prop_mutable = property_map::read_bool(&token_data.default_properties, &string::utf8(TOKEN_PROPERTY_MUTABLE)); + let token_prop_mutable = token_data.default_properties.read_bool(&string::utf8(TOKEN_PROPERTY_MUTABLE)); assert!(token_prop_mutable, error::permission_denied(EFIELD_NOT_MUTABLE)); }; @@ -892,24 +945,26 @@ module aptos_token::token { direct_deposit(token_owner, new_token); update_token_property_internal(token_owner, new_token_id, keys, values, types); if (std::features::module_event_migration_enabled()) { - event::emit(MutateTokenPropertyMap { + event::emit(MutatePropertyMap { + account: token_owner, old_id: token_id, new_id: new_token_id, keys, values, types }); + } else { + event::emit_event( + &mut TokenStore[token_owner].mutate_token_property_events, + MutateTokenPropertyMapEvent { + old_id: token_id, + new_id: new_token_id, + keys, + values, + types + }, + ); }; - event::emit_event( - &mut borrow_global_mut(token_owner).mutate_token_property_events, - MutateTokenPropertyMapEvent { - old_id: token_id, - new_id: new_token_id, - keys, - values, - types - }, - ); token_data.largest_property_version = cur_property_version; // burn the orignial property_version 0 token after mutation @@ -919,31 +974,34 @@ module aptos_token::token { // only 1 copy for the token with property verion bigger than 0 update_token_property_internal(token_owner, token_id, keys, values, types); if (std::features::module_event_migration_enabled()) { - event::emit(MutateTokenPropertyMap { + event::emit(MutatePropertyMap { + account: token_owner, old_id: token_id, new_id: token_id, keys, values, types }); + } else { + event::emit_event( + &mut TokenStore[token_owner].mutate_token_property_events, + MutateTokenPropertyMapEvent { + old_id: token_id, + new_id: token_id, + keys, + values, + types + }, + ); }; - event::emit_event( - &mut borrow_global_mut(token_owner).mutate_token_property_events, - MutateTokenPropertyMapEvent { - old_id: token_id, - new_id: token_id, - keys, - values, - types - }, - ); token_id } } public fun create_royalty(royalty_points_numerator: u64, royalty_points_denominator: u64, payee_address: address): Royalty { assert!(royalty_points_numerator <= royalty_points_denominator, error::invalid_argument(EINVALID_ROYALTY_NUMERATOR_DENOMINATOR)); - assert!(account::exists_at(payee_address), error::invalid_argument(EROYALTY_PAYEE_ACCOUNT_DOES_NOT_EXIST)); + // Question[Orderless]: Is it okay to remove this check to accommodate stateless accounts? + // assert!(account::exists_at(payee_address), error::invalid_argument(EROYALTY_PAYEE_ACCOUNT_DOES_NOT_EXIST)); Royalty { royalty_points_numerator, royalty_points_denominator, @@ -960,7 +1018,7 @@ module aptos_token::token { /// direct deposit if user opt in direct transfer public fun direct_deposit_with_opt_in(account_addr: address, token: Token) acquires TokenStore { - let opt_in_transfer = borrow_global(account_addr).direct_transfer; + let opt_in_transfer = TokenStore[account_addr].direct_transfer; assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER)); direct_deposit(account_addr, token); } @@ -993,7 +1051,7 @@ module aptos_token::token { public fun merge(dst_token: &mut Token, source_token: Token) { assert!(&dst_token.id == &source_token.id, error::invalid_argument(EINVALID_TOKEN_MERGE)); - dst_token.amount = dst_token.amount + source_token.amount; + dst_token.amount += source_token.amount; let Token { id: _, amount: _, token_properties: _ } = source_token; } @@ -1001,7 +1059,7 @@ module aptos_token::token { assert!(dst_token.id.property_version == 0, error::invalid_state(ENFT_NOT_SPLITABLE)); assert!(dst_token.amount > amount, error::invalid_argument(ETOKEN_SPLIT_AMOUNT_LARGER_OR_EQUAL_TO_TOKEN_AMOUNT)); assert!(amount > 0, error::invalid_argument(ETOKEN_CANNOT_HAVE_ZERO_AMOUNT)); - dst_token.amount = dst_token.amount - amount; + dst_token.amount -= amount; Token { id: dst_token.id, amount, @@ -1020,7 +1078,7 @@ module aptos_token::token { to: address, amount: u64, ) acquires TokenStore { - let opt_in_transfer = borrow_global(to).direct_transfer; + let opt_in_transfer = TokenStore[to].direct_transfer; assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER)); let token = withdraw_token(from, id, amount); direct_deposit(to, token); @@ -1108,8 +1166,8 @@ module aptos_token::token { maximum: u64, mutate_setting: vector ) acquires Collections { - assert!(string::length(&name) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); + assert!(name.length() <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); let account_addr = signer::address_of(creator); if (!exists(account_addr)) { move_to( @@ -1124,25 +1182,25 @@ module aptos_token::token { ) }; - let collection_data = &mut borrow_global_mut(account_addr).collection_data; + let collection_data = &mut Collections[account_addr].collection_data; assert!( - !table::contains(collection_data, name), + !collection_data.contains(name), error::already_exists(ECOLLECTION_ALREADY_EXISTS), ); let mutability_config = create_collection_mutability_config(&mutate_setting); let collection = CollectionData { description, - name: name, + name, uri, supply: 0, maximum, mutability_config }; - table::add(collection_data, name, collection); - let collection_handle = borrow_global_mut(account_addr); + collection_data.add(name, collection); + let collection_handle = &mut Collections[account_addr]; if (std::features::module_event_migration_enabled()) { event::emit( CreateCollection { @@ -1153,17 +1211,18 @@ module aptos_token::token { maximum, } ); + } else { + event::emit_event( + &mut collection_handle.create_collection_events, + CreateCollectionEvent { + creator: account_addr, + collection_name: name, + uri, + description, + maximum, + } + ); }; - event::emit_event( - &mut collection_handle.create_collection_events, - CreateCollectionEvent { - creator: account_addr, - collection_name: name, - uri, - description, - maximum, - } - ); } public fun check_collection_exists(creator: address, name: String): bool acquires Collections { @@ -1172,8 +1231,8 @@ module aptos_token::token { error::not_found(ECOLLECTIONS_NOT_PUBLISHED), ); - let collection_data = &borrow_global(creator).collection_data; - table::contains(collection_data, name) + let collection_data = &Collections[creator].collection_data; + collection_data.contains(name) } public fun check_tokendata_exists(creator: address, collection_name: String, token_name: String): bool acquires Collections { @@ -1182,9 +1241,9 @@ module aptos_token::token { error::not_found(ECOLLECTIONS_NOT_PUBLISHED), ); - let token_data = &borrow_global(creator).token_data; + let token_data = &Collections[creator].token_data; let token_data_id = create_token_data_id(creator, collection_name, token_name); - table::contains(token_data, token_data_id) + token_data.contains(token_data_id) } public fun create_tokendata( @@ -1202,9 +1261,9 @@ module aptos_token::token { property_values: vector>, property_types: vector ): TokenDataId acquires Collections { - assert!(string::length(&name) <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG)); - assert!(string::length(&collection) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); - assert!(string::length(&uri) <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); + assert!(name.length() <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG)); + assert!(collection.length() <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); + assert!(uri.length() <= MAX_URI_LENGTH, error::invalid_argument(EURI_TOO_LONG)); assert!(royalty_points_numerator <= royalty_points_denominator, error::invalid_argument(EINVALID_ROYALTY_NUMERATOR_DENOMINATOR)); let account_addr = signer::address_of(account); @@ -1212,25 +1271,25 @@ module aptos_token::token { exists(account_addr), error::not_found(ECOLLECTIONS_NOT_PUBLISHED), ); - let collections = borrow_global_mut(account_addr); + let collections = &mut Collections[account_addr]; let token_data_id = create_token_data_id(account_addr, collection, name); assert!( - table::contains(&collections.collection_data, token_data_id.collection), + collections.collection_data.contains(token_data_id.collection), error::not_found(ECOLLECTION_NOT_PUBLISHED), ); assert!( - !table::contains(&collections.token_data, token_data_id), + !collections.token_data.contains(token_data_id), error::already_exists(ETOKEN_DATA_ALREADY_EXISTS), ); - let collection = table::borrow_mut(&mut collections.collection_data, token_data_id.collection); + let collection = collections.collection_data.borrow_mut(token_data_id.collection); // if collection maximum == 0, user don't want to enforce supply constraint. // we don't track supply to make token creation parallelizable if (collection.maximum > 0) { - collection.supply = collection.supply + 1; + collection.supply += 1; assert!( collection.maximum >= collection.supply, error::invalid_argument(ECREATE_WOULD_EXCEED_COLLECTION_MAXIMUM), @@ -1249,10 +1308,11 @@ module aptos_token::token { mutability_config: token_mutate_config, }; - table::add(&mut collections.token_data, token_data_id, token_data); + collections.token_data.add(token_data_id, token_data); if (std::features::module_event_migration_enabled()) { event::emit( - CreateTokenData { + TokenDataCreation { + creator: account_addr, id: token_data_id, description, maximum, @@ -1267,32 +1327,35 @@ module aptos_token::token { property_types, } ); + } else { + event::emit_event( + &mut collections.create_token_data_events, + CreateTokenDataEvent { + id: token_data_id, + description, + maximum, + uri, + royalty_payee_address, + royalty_points_denominator, + royalty_points_numerator, + name, + mutability_config: token_mutate_config, + property_keys, + property_values, + property_types, + }, + ); }; - event::emit_event( - &mut collections.create_token_data_events, - CreateTokenDataEvent { - id: token_data_id, - description, - maximum, - uri, - royalty_payee_address, - royalty_points_denominator, - royalty_points_numerator, - name, - mutability_config: token_mutate_config, - property_keys, - property_values, - property_types, - }, - ); token_data_id } /// return the number of distinct token_data_id created under this collection public fun get_collection_supply(creator_address: address, collection_name: String): Option acquires Collections { assert_collection_exists(creator_address, collection_name); - let collection_data = table::borrow_mut(&mut borrow_global_mut(creator_address).collection_data, collection_name); + let collection_data = Collections[creator_address].collection_data.borrow_mut( + collection_name + ); if (collection_data.maximum > 0) { option::some(collection_data.supply) @@ -1303,28 +1366,34 @@ module aptos_token::token { public fun get_collection_description(creator_address: address, collection_name: String): String acquires Collections { assert_collection_exists(creator_address, collection_name); - let collection_data = table::borrow_mut(&mut borrow_global_mut(creator_address).collection_data, collection_name); + let collection_data = Collections[creator_address].collection_data.borrow_mut( + collection_name + ); collection_data.description } public fun get_collection_uri(creator_address: address, collection_name: String): String acquires Collections { assert_collection_exists(creator_address, collection_name); - let collection_data = table::borrow_mut(&mut borrow_global_mut(creator_address).collection_data, collection_name); + let collection_data = Collections[creator_address].collection_data.borrow_mut( + collection_name + ); collection_data.uri } public fun get_collection_maximum(creator_address: address, collection_name: String): u64 acquires Collections { assert_collection_exists(creator_address, collection_name); - let collection_data = table::borrow_mut(&mut borrow_global_mut(creator_address).collection_data, collection_name); + let collection_data = Collections[creator_address].collection_data.borrow_mut( + collection_name + ); collection_data.maximum } /// return the number of distinct token_id created under this TokenData public fun get_token_supply(creator_address: address, token_data_id: TokenDataId): Option acquires Collections { assert!(exists(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &borrow_global(creator_address).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow(all_token_data, token_data_id); + let all_token_data = &Collections[creator_address].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let token_data = all_token_data.borrow(token_data_id); if (token_data.maximum > 0) { option::some(token_data.supply) @@ -1336,9 +1405,9 @@ module aptos_token::token { /// return the largest_property_version of this TokenData public fun get_tokendata_largest_property_version(creator_address: address, token_data_id: TokenDataId): u64 acquires Collections { assert!(exists(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &borrow_global(creator_address).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - table::borrow(all_token_data, token_data_id).largest_property_version + let all_token_data = &Collections[creator_address].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + all_token_data.borrow(token_data_id).largest_property_version } /// return the TokenId for a given Token @@ -1351,24 +1420,24 @@ module aptos_token::token { return false }; - borrow_global(receiver).direct_transfer + TokenStore[receiver].direct_transfer } public fun create_token_mutability_config(mutate_setting: &vector): TokenMutabilityConfig { TokenMutabilityConfig { - maximum: *vector::borrow(mutate_setting, TOKEN_MAX_MUTABLE_IND), - uri: *vector::borrow(mutate_setting, TOKEN_URI_MUTABLE_IND), - royalty: *vector::borrow(mutate_setting, TOKEN_ROYALTY_MUTABLE_IND), - description: *vector::borrow(mutate_setting, TOKEN_DESCRIPTION_MUTABLE_IND), - properties: *vector::borrow(mutate_setting, TOKEN_PROPERTY_MUTABLE_IND), + maximum: mutate_setting[TOKEN_MAX_MUTABLE_IND], + uri: mutate_setting[TOKEN_URI_MUTABLE_IND], + royalty: mutate_setting[TOKEN_ROYALTY_MUTABLE_IND], + description: mutate_setting[TOKEN_DESCRIPTION_MUTABLE_IND], + properties: mutate_setting[TOKEN_PROPERTY_MUTABLE_IND], } } public fun create_collection_mutability_config(mutate_setting: &vector): CollectionMutabilityConfig { CollectionMutabilityConfig { - description: *vector::borrow(mutate_setting, COLLECTION_DESCRIPTION_MUTABLE_IND), - uri: *vector::borrow(mutate_setting, COLLECTION_URI_MUTABLE_IND), - maximum: *vector::borrow(mutate_setting, COLLECTION_MAX_MUTABLE_IND), + description: mutate_setting[COLLECTION_DESCRIPTION_MUTABLE_IND], + uri: mutate_setting[COLLECTION_URI_MUTABLE_IND], + maximum: mutate_setting[COLLECTION_MAX_MUTABLE_IND], } } @@ -1379,27 +1448,28 @@ module aptos_token::token { ): TokenId acquires Collections, TokenStore { assert!(token_data_id.creator == signer::address_of(account), error::permission_denied(ENO_MINT_CAPABILITY)); let creator_addr = token_data_id.creator; - let all_token_data = &mut borrow_global_mut(creator_addr).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let token_data = all_token_data.borrow_mut(token_data_id); if (token_data.maximum > 0) { assert!(token_data.supply + amount <= token_data.maximum, error::invalid_argument(EMINT_WOULD_EXCEED_TOKEN_MAXIMUM)); - token_data.supply = token_data.supply + amount; + token_data.supply += amount; }; // we add more tokens with property_version 0 let token_id = create_token_id(token_data_id, 0); if (std::features::module_event_migration_enabled()) { - event::emit(MintToken { id: token_data_id, amount }) + event::emit(Mint { creator: creator_addr, id: token_data_id, amount }) + } else { + event::emit_event( + &mut Collections[creator_addr].mint_token_events, + MintTokenEvent { + id: token_data_id, + amount, + } + ); }; - event::emit_event( - &mut borrow_global_mut(creator_addr).mint_token_events, - MintTokenEvent { - id: token_data_id, - amount, - } - ); deposit_token(account, Token { @@ -1420,33 +1490,34 @@ module aptos_token::token { amount: u64, ) acquires Collections, TokenStore { assert!(exists(receiver), error::not_found(ETOKEN_STORE_NOT_PUBLISHED)); - let opt_in_transfer = borrow_global(receiver).direct_transfer; + let opt_in_transfer = TokenStore[receiver].direct_transfer; assert!(opt_in_transfer, error::permission_denied(EUSER_NOT_OPT_IN_DIRECT_TRANSFER)); assert!(token_data_id.creator == signer::address_of(account), error::permission_denied(ENO_MINT_CAPABILITY)); let creator_addr = token_data_id.creator; - let all_token_data = &mut borrow_global_mut(creator_addr).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow_mut(all_token_data, token_data_id); + let all_token_data = &mut Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let token_data = all_token_data.borrow_mut(token_data_id); if (token_data.maximum > 0) { assert!(token_data.supply + amount <= token_data.maximum, error::invalid_argument(EMINT_WOULD_EXCEED_TOKEN_MAXIMUM)); - token_data.supply = token_data.supply + amount; + token_data.supply += amount; }; // we add more tokens with property_version 0 let token_id = create_token_id(token_data_id, 0); if (std::features::module_event_migration_enabled()) { - event::emit(MintToken { id: token_data_id, amount }) + event::emit(Mint { creator: creator_addr, id: token_data_id, amount }) + } else { + event::emit_event( + &mut Collections[creator_addr].mint_token_events, + MintTokenEvent { + id: token_data_id, + amount, + } + ); }; - event::emit_event( - &mut borrow_global_mut(creator_addr).mint_token_events, - MintTokenEvent { - id: token_data_id, - amount, - } - ); direct_deposit(receiver, Token { @@ -1469,8 +1540,8 @@ module aptos_token::token { collection: String, name: String, ): TokenDataId { - assert!(string::length(&collection) <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); - assert!(string::length(&name) <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG)); + assert!(collection.length() <= MAX_COLLECTION_NAME_LENGTH, error::invalid_argument(ECOLLECTION_NAME_TOO_LONG)); + assert!(name.length() <= MAX_NFT_NAME_LENGTH, error::invalid_argument(ENFT_NAME_TOO_LONG)); TokenDataId { creator, collection, name } } @@ -1490,9 +1561,9 @@ module aptos_token::token { if (!exists(owner)) { return 0 }; - let token_store = borrow_global(owner); - if (table::contains(&token_store.tokens, id)) { - table::borrow(&token_store.tokens, id).amount + let token_store = &TokenStore[owner]; + if (token_store.tokens.contains(id)) { + token_store.tokens.borrow(id).amount } else { 0 } @@ -1549,52 +1620,52 @@ module aptos_token::token { // if property_version = 0, return default property map if (token_id.property_version == 0) { let creator_addr = token_id.token_data_id.creator; - let all_token_data = &borrow_global(creator_addr).token_data; - assert!(table::contains(all_token_data, token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow(all_token_data, token_id.token_data_id); + let all_token_data = &Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_id.token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let token_data = all_token_data.borrow(token_id.token_data_id); token_data.default_properties } else { - let tokens = &borrow_global(owner).tokens; - table::borrow(tokens, token_id).token_properties + let tokens = &TokenStore[owner].tokens; + tokens.borrow(token_id).token_properties } } public fun get_tokendata_maximum(token_data_id: TokenDataId): u64 acquires Collections { let creator_address = token_data_id.creator; assert!(exists(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &borrow_global(creator_address).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let all_token_data = &Collections[creator_address].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow(all_token_data, token_data_id); + let token_data = all_token_data.borrow(token_data_id); token_data.maximum } public fun get_tokendata_uri(creator: address, token_data_id: TokenDataId): String acquires Collections { assert!(exists(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &borrow_global(creator).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let all_token_data = &Collections[creator].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow(all_token_data, token_data_id); + let token_data = all_token_data.borrow(token_data_id); token_data.uri } public fun get_tokendata_description(token_data_id: TokenDataId): String acquires Collections { let creator_address = token_data_id.creator; assert!(exists(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &borrow_global(creator_address).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let all_token_data = &Collections[creator_address].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow(all_token_data, token_data_id); + let token_data = all_token_data.borrow(token_data_id); token_data.description } public fun get_tokendata_royalty(token_data_id: TokenDataId): Royalty acquires Collections { let creator_address = token_data_id.creator; assert!(exists(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &borrow_global(creator_address).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let all_token_data = &Collections[creator_address].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - let token_data = table::borrow(all_token_data, token_data_id); + let token_data = all_token_data.borrow(token_data_id); token_data.royalty } @@ -1607,9 +1678,9 @@ module aptos_token::token { public fun get_tokendata_mutability_config(token_data_id: TokenDataId): TokenMutabilityConfig acquires Collections { let creator_addr = token_data_id.creator; assert!(exists(creator_addr), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &borrow_global(creator_addr).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); - table::borrow(all_token_data, token_data_id).mutability_config + let all_token_data = &Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + all_token_data.borrow(token_data_id).mutability_config } /// return if the token's maximum is mutable @@ -1644,9 +1715,9 @@ module aptos_token::token { collection_name: String ): CollectionMutabilityConfig acquires Collections { assert!(exists(creator), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_collection_data = &borrow_global(creator).collection_data; - assert!(table::contains(all_collection_data, collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED)); - table::borrow(all_collection_data, collection_name).mutability_config + let all_collection_data = &Collections[creator].collection_data; + assert!(all_collection_data.contains(collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED)); + all_collection_data.borrow(collection_name).mutability_config } /// return if the collection description is mutable with a collection mutability config @@ -1706,26 +1777,28 @@ module aptos_token::token { error::not_found(ETOKEN_STORE_NOT_PUBLISHED), ); - let token_store = borrow_global_mut(account_addr); + let token_store = &mut TokenStore[account_addr]; if (std::features::module_event_migration_enabled()) { - event::emit(Withdraw { id, amount }) + event::emit(TokenWithdraw { account: account_addr, id, amount }) + } else { + event::emit_event( + &mut token_store.withdraw_events, + WithdrawEvent { id, amount } + ); }; - event::emit_event( - &mut token_store.withdraw_events, - WithdrawEvent { id, amount } - ); - let tokens = &mut borrow_global_mut(account_addr).tokens; + + let tokens = &mut TokenStore[account_addr].tokens; assert!( - table::contains(tokens, id), + tokens.contains(id), error::not_found(ENO_TOKEN_IN_TOKEN_STORE), ); // balance > amount and amount > 0 indirectly asserted that balance > 0. - let balance = &mut table::borrow_mut(tokens, id).amount; + let balance = &mut tokens.borrow_mut(id).amount; if (*balance > amount) { - *balance = *balance - amount; + *balance -= amount; Token { id, amount, token_properties: property_map::empty() } } else { - table::remove(tokens, id) + tokens.remove(id) } } @@ -1736,60 +1809,61 @@ module aptos_token::token { values: vector>, types: vector, ) acquires TokenStore { - let tokens = &mut borrow_global_mut(token_owner).tokens; - assert!(table::contains(tokens, token_id), error::not_found(ENO_TOKEN_IN_TOKEN_STORE)); + let tokens = &mut TokenStore[token_owner].tokens; + assert!(tokens.contains(token_id), error::not_found(ENO_TOKEN_IN_TOKEN_STORE)); - let value = &mut table::borrow_mut(tokens, token_id).token_properties; + let value = &mut tokens.borrow_mut(token_id).token_properties; assert_non_standard_reserved_property(&keys); - property_map::update_property_map(value, keys, values, types); + value.update_property_map(keys, values, types); } /// Deposit the token balance into the recipients account and emit an event. fun direct_deposit(account_addr: address, token: Token) acquires TokenStore { assert!(token.amount > 0, error::invalid_argument(ETOKEN_CANNOT_HAVE_ZERO_AMOUNT)); - let token_store = borrow_global_mut(account_addr); + let token_store = &mut TokenStore[account_addr]; if (std::features::module_event_migration_enabled()) { - event::emit(Deposit { id: token.id, amount: token.amount }); + event::emit(TokenDeposit { account: account_addr, id: token.id, amount: token.amount }); + } else { + event::emit_event( + &mut token_store.deposit_events, + DepositEvent { id: token.id, amount: token.amount }, + ); }; - event::emit_event( - &mut token_store.deposit_events, - DepositEvent { id: token.id, amount: token.amount }, - ); assert!( exists(account_addr), error::not_found(ETOKEN_STORE_NOT_PUBLISHED), ); - if (!table::contains(&token_store.tokens, token.id)) { - table::add(&mut token_store.tokens, token.id, token); + if (!token_store.tokens.contains(token.id)) { + token_store.tokens.add(token.id, token); } else { - let recipient_token = table::borrow_mut(&mut token_store.tokens, token.id); + let recipient_token = token_store.tokens.borrow_mut(token.id); merge(recipient_token, token); }; } fun assert_collection_exists(creator_address: address, collection_name: String) acquires Collections { assert!(exists(creator_address), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_collection_data = &borrow_global(creator_address).collection_data; - assert!(table::contains(all_collection_data, collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED)); + let all_collection_data = &Collections[creator_address].collection_data; + assert!(all_collection_data.contains(collection_name), error::not_found(ECOLLECTION_NOT_PUBLISHED)); } fun assert_tokendata_exists(creator: &signer, token_data_id: TokenDataId) acquires Collections { let creator_addr = token_data_id.creator; assert!(signer::address_of(creator) == creator_addr, error::permission_denied(ENO_MUTATE_CAPABILITY)); assert!(exists(creator_addr), error::not_found(ECOLLECTIONS_NOT_PUBLISHED)); - let all_token_data = &mut borrow_global_mut(creator_addr).token_data; - assert!(table::contains(all_token_data, token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); + let all_token_data = &Collections[creator_addr].token_data; + assert!(all_token_data.contains(token_data_id), error::not_found(ETOKEN_DATA_NOT_PUBLISHED)); } fun assert_non_standard_reserved_property(keys: &vector) { - vector::for_each_ref(keys, |key| { + keys.for_each_ref(|key| { let key: &String = key; - let length = string::length(key); + let length = key.length(); if (length >= 6) { - let prefix = string::sub_string(&*key, 0, 6); + let prefix = key.sub_string(0, 6); assert!(prefix != string::utf8(b"TOKEN_"), error::permission_denied(EPROPERTY_RESERVED_BY_STANDARD)); }; }); @@ -1947,9 +2021,9 @@ module aptos_token::token { mutate_setting ); - let default_keys = if (vector::length(&property_keys) == 0) { vector[string::utf8(b"attack"), string::utf8(b"num_of_use")] } else { property_keys }; - let default_vals = if (vector::length>(&property_values) == 0) { vector>[bcs::to_bytes(&10), bcs::to_bytes(&5)] } else { property_values }; - let default_types = if (vector::length(&property_types) == 0) { vector[string::utf8(b"u64"), string::utf8(b"u64")] } else { property_types }; + let default_keys = if (property_keys.length::() == 0) { vector[string::utf8(b"attack"), string::utf8(b"num_of_use")] } else { property_keys }; + let default_vals = if (property_values.length::>() == 0) { vector>[bcs::to_bytes(&10), bcs::to_bytes(&5)] } else { property_values }; + let default_types = if (property_types.length::() == 0) { vector[string::utf8(b"u64"), string::utf8(b"u64")] } else { property_types }; let mutate_setting = token_mutate_setting; create_token_script( creator, @@ -1984,8 +2058,11 @@ module aptos_token::token { vector[false, false, false], vector[false, false, false, false, false], ); - let collections = borrow_global(signer::address_of(&creator)); - assert!(event::counter(&collections.create_collection_events) == 1, 1); + assert!( + event::emitted_events().length() == 1 || event::emitted_events().length( + ) == 1, + 1 + ); } #[test(creator = @0xAF)] @@ -2017,7 +2094,7 @@ module aptos_token::token { assert!(balance_of(signer::address_of(creator), token_id) == 3, 1); } - #[test(creator = @0xAF, owner = @0xBB)] + #[test(creator = @0xAF)] fun test_mutate_token_property_upsert(creator: &signer) acquires Collections, TokenStore { use std::bcs; account::create_account_for_test(signer::address_of(creator)); @@ -2060,7 +2137,7 @@ module aptos_token::token { ); } - #[test(creator = @0xAF, owner = @0xBB)] + #[test(creator = @0xAF)] fun test_get_property_map_should_not_update_source_value(creator: &signer) acquires Collections, TokenStore { use std::bcs; account::create_account_for_test(signer::address_of(creator)); @@ -2089,7 +2166,7 @@ module aptos_token::token { string::utf8(b"u64"), string::utf8(b"u64") ]; let pm = get_property_map(signer::address_of(creator), token_id); - assert!(property_map::length(&pm) == 2, 1); + assert!(pm.length() == 2, 1); let new_token_id = mutate_one_token( creator, signer::address_of(creator), @@ -2099,16 +2176,12 @@ module aptos_token::token { new_types, ); let updated_pm = get_property_map(signer::address_of(creator), new_token_id); - assert!(property_map::length(&updated_pm) == 2, 1); - property_map::update_property_value( - &mut updated_pm, - &string::utf8(b"attack"), - property_map::create_property_value(&2), - ); + assert!(updated_pm.length() == 2, 1); + updated_pm.update_property_value(&string::utf8(b"attack"), property_map::create_property_value(&2)); - assert!(property_map::read_u64(&updated_pm, &string::utf8(b"attack")) == 2, 1); + assert!(updated_pm.read_u64(&string::utf8(b"attack")) == 2, 1); let og_pm = get_property_map(signer::address_of(creator), new_token_id); - assert!(property_map::read_u64(&og_pm, &string::utf8(b"attack")) == 1, 1); + assert!(og_pm.read_u64(&string::utf8(b"attack")) == 1, 1); } #[test(framework = @0x1, creator = @0xcafe)] @@ -2167,7 +2240,7 @@ module aptos_token::token { let pre_amount = &mut get_token_supply(creator_addr, token_id.token_data_id); burn_by_creator(creator, creator_addr, get_collection_name(), get_token_name(), 0, 1); let aft_amount = &mut get_token_supply(creator_addr, token_id.token_data_id); - assert!((option::extract(pre_amount) - option::extract(aft_amount)) == 1, 1); + assert!((pre_amount.extract::() - aft_amount.extract::()) == 1, 1); // create unlimited token and collection let new_addr = signer::address_of(another_creator); @@ -2335,9 +2408,9 @@ module aptos_token::token { assert!(balance_of(signer::address_of(owner), token_id) == 0, 1); // The corresponding token_data and collection_data should be deleted - let collections = borrow_global(signer::address_of(creator)); - assert!(!table::contains(&collections.collection_data, token_id.token_data_id.name), 1); - assert!(!table::contains(&collections.token_data, token_id.token_data_id), 1); + let collections = &Collections[signer::address_of(creator)]; + assert!(!collections.collection_data.contains(token_id.token_data_id.name), 1); + assert!(!collections.token_data.contains(token_id.token_data_id), 1); } #[test(creator = @0xcafe)] @@ -2411,7 +2484,7 @@ module aptos_token::token { assert!(get_collection_maximum(creator_address, collection_name) == 10, 1); } - #[test(creator = @0xcafe, owner = @0x456)] + #[test(creator = @0xcafe)] fun test_mutate_default_token_properties( creator: &signer, ) acquires Collections, TokenStore { @@ -2449,10 +2522,10 @@ module aptos_token::token { new_types, ); - let all_token_data = &borrow_global(signer::address_of(creator)).token_data; - assert!(table::contains(all_token_data, token_id.token_data_id), 1); - let props = &table::borrow(all_token_data, token_id.token_data_id).default_properties; - assert!(property_map::read_u64(props, &string::utf8(b"attack")) == 1, 1); + let all_token_data = &Collections[signer::address_of(creator)].token_data; + assert!(all_token_data.contains(token_id.token_data_id), 1); + let props = &all_token_data.borrow(token_id.token_data_id).default_properties; + assert!(props.read_u64(&string::utf8(b"attack")) == 1, 1); } #[test(creator = @0xcafe)] @@ -2614,10 +2687,10 @@ module aptos_token::token { assert!(balance_of(signer::address_of(creator), new_id_2) == 1, 1); assert!(balance_of(signer::address_of(creator), token_id) == 0, 1); - let creator_props = &borrow_global(signer::address_of(creator)).tokens; - let token = table::borrow(creator_props, new_id_1); + let creator_props = &TokenStore[signer::address_of(creator)].tokens; + let token = creator_props.borrow(new_id_1); - assert!(property_map::length(&token.token_properties) == 2, property_map::length(&token.token_properties)); + assert!(token.token_properties.length() == 2, token.token_properties.length()); // mutate token with property_version > 0 should not generate new property_version mutate_token_properties( creator, @@ -2635,10 +2708,10 @@ module aptos_token::token { // transfer token with property_version > 0 also transfer the token properties direct_transfer(creator, owner, new_id_1, 1); - let props = &borrow_global(signer::address_of(owner)).tokens; - assert!(table::contains(props, new_id_1), 1); - let token = table::borrow(props, new_id_1); - assert!(property_map::length(&token.token_properties) == 2, property_map::length(&token.token_properties)); + let props = &TokenStore[signer::address_of(owner)].tokens; + assert!(props.contains(new_id_1), 1); + let token = props.borrow(new_id_1); + assert!(token.token_properties.length() == 2, token.token_properties.length()); } #[test(creator = @0xcafe)] @@ -2726,9 +2799,9 @@ module aptos_token::token { ); let (token, capability) = partial_withdraw_with_capability(cap, 1); - assert!(option::borrow(&capability).amount == 2, 1); - let (token_1, cap) = partial_withdraw_with_capability(option::extract(&mut capability), 2); - assert!(option::is_none(&cap), 1); + assert!(capability.borrow::().amount == 2, 1); + let (token_1, cap) = partial_withdraw_with_capability(capability.extract(), 2); + assert!(cap.is_none(), 1); merge(&mut token, token_1); token } diff --git a/aptos-move/framework/aptos-token/sources/token.spec.move b/aptos-move/framework/aptos-token/sources/token.spec.move index 5226e51f27441..03d8ffd114a02 100644 --- a/aptos-move/framework/aptos-token/sources/token.spec.move +++ b/aptos-move/framework/aptos-token/sources/token.spec.move @@ -1,7 +1,7 @@ spec aptos_token::token { spec module { pragma verify = true; - pragma aborts_if_is_strict; + pragma aborts_if_is_partial; } /// The length of the name is up to MAX_COLLECTION_NAME_LENGTH; @@ -86,12 +86,12 @@ spec aptos_token::token { include CreateTokenDataIdAbortsIf{ creator: token_data_address, - collection: collection, - name: name + collection, + name }; include MintTokenAbortsIf { - token_data_id: token_data_id + token_data_id }; } @@ -113,7 +113,7 @@ spec aptos_token::token { let addr = signer::address_of(account); aborts_if addr != creator; include CreateTokenDataIdAbortsIf { - creator: creator, + creator, collection: collection_name, name: token_name }; @@ -132,8 +132,8 @@ spec aptos_token::token { pragma aborts_if_is_partial; include CreateTokenDataIdAbortsIf{ creator: creators_address, - collection: collection, - name: name + collection, + name }; } @@ -142,12 +142,12 @@ spec aptos_token::token { pragma aborts_if_is_partial; let addr = signer::address_of(account); let account_addr = global(addr); - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account_addr.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account_addr.guid_creation_num + 4 > MAX_U64; - aborts_if !exists(addr) && account_addr.guid_creation_num + 9 > account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account_addr.guid_creation_num + 9 > MAX_U64; - aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account_addr.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account_addr.guid_creation_num + 4 > MAX_U64; + // aborts_if !exists(addr) && account_addr.guid_creation_num + 9 > account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account_addr.guid_creation_num + 9 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); } spec transfer_with_opt_in( @@ -162,7 +162,7 @@ spec aptos_token::token { //TODO: Abort condition is complex because of transfer function. pragma aborts_if_is_partial; include CreateTokenDataIdAbortsIf{ - creator: creator, + creator, collection: collection_name, name: token_name }; @@ -243,12 +243,12 @@ spec aptos_token::token { let collection_data = table::spec_get(global(addr).collection_data, collection_name); include AssertCollectionExistsAbortsIf { creator_address: addr, - collection_name: collection_name + collection_name }; aborts_if !collection_data.mutability_config.description; - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; } /// The uri of Collection is mutable. @@ -259,12 +259,12 @@ spec aptos_token::token { aborts_if len(uri.bytes) > MAX_URI_LENGTH; include AssertCollectionExistsAbortsIf { creator_address: addr, - collection_name: collection_name + collection_name }; aborts_if !collection_data.mutability_config.uri; - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; } /// Cannot change maximum from 0 and cannot change maximum to 0. @@ -276,14 +276,14 @@ spec aptos_token::token { let collection_data = table::spec_get(global(addr).collection_data, collection_name); include AssertCollectionExistsAbortsIf { creator_address: addr, - collection_name: collection_name + collection_name }; aborts_if collection_data.maximum == 0 || maximum == 0; aborts_if maximum < collection_data.supply; aborts_if !collection_data.mutability_config.maximum; - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; } /// Cannot change maximum from 0 and cannot change maximum to 0. @@ -298,9 +298,9 @@ spec aptos_token::token { aborts_if token_data.maximum == 0 || maximum == 0; aborts_if maximum < token_data.supply; aborts_if !token_data.mutability_config.maximum; - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; } /// The length of uri should less than MAX_URI_LENGTH. @@ -318,9 +318,9 @@ spec aptos_token::token { include AssertTokendataExistsAbortsIf; aborts_if len(uri.bytes) > MAX_URI_LENGTH; aborts_if !token_data.mutability_config.uri; - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; } /// The token royalty is mutable @@ -331,9 +331,9 @@ spec aptos_token::token { let all_token_data = global(token_data_id.creator).token_data; let token_data = table::spec_get(all_token_data, token_data_id); aborts_if !token_data.mutability_config.royalty; - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; } /// The token description is mutable @@ -344,9 +344,9 @@ spec aptos_token::token { let all_token_data = global(token_data_id.creator).token_data; let token_data = table::spec_get(all_token_data, token_data_id); aborts_if !token_data.mutability_config.description; - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account.guid_creation_num + 9 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account.guid_creation_num + 9 > MAX_U64; } /// The property map is mutable @@ -401,7 +401,6 @@ spec aptos_token::token { royalty_points_denominator: u64; payee_address: address; aborts_if royalty_points_numerator > royalty_points_denominator; - aborts_if !exists(payee_address); } spec deposit_token(account: &signer, token: Token) { @@ -446,9 +445,9 @@ spec aptos_token::token { let addr = signer::address_of(account); let account_addr = global(addr); - aborts_if !exists(addr) && !exists(addr); - aborts_if !exists(addr) && account_addr.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM; - aborts_if !exists(addr) && account_addr.guid_creation_num + 4 > MAX_U64; + // aborts_if !exists(addr) && !exists(addr); + // aborts_if !exists(addr) && account_addr.guid_creation_num + 4 >= account::MAX_GUID_CREATION_NUM; + // aborts_if !exists(addr) && account_addr.guid_creation_num + 4 > MAX_U64; } spec merge(dst_token: &mut Token, source_token: Token) { @@ -548,11 +547,11 @@ spec aptos_token::token { let collection_data = global(addr).collection_data; // TODO: The collection_data should not exist before you create it. // aborts_if table::spec_contains(collection_data, name); - aborts_if b && !exists(addr); - aborts_if len(name.bytes) > MAX_COLLECTION_NAME_LENGTH; - aborts_if len(uri.bytes) > MAX_URI_LENGTH; - aborts_if b && account.guid_creation_num + 3 >= account::MAX_GUID_CREATION_NUM; - aborts_if b && account.guid_creation_num + 3 > MAX_U64; + // aborts_if b && !exists(addr); + // aborts_if len(name.bytes) > MAX_COLLECTION_NAME_LENGTH; + // aborts_if len(uri.bytes) > MAX_URI_LENGTH; + // aborts_if b && account.guid_creation_num + 3 >= account::MAX_GUID_CREATION_NUM; + // aborts_if b && account.guid_creation_num + 3 > MAX_U64; include CreateCollectionMutabilityConfigAbortsIf; } @@ -565,7 +564,7 @@ spec aptos_token::token { spec check_tokendata_exists(creator: address, collection_name: String, token_name: String): bool { aborts_if !exists(creator); include CreateTokenDataIdAbortsIf { - creator: creator, + creator, collection: collection_name, name: token_name }; @@ -603,8 +602,8 @@ spec aptos_token::token { aborts_if !exists(account_addr); include CreateTokenDataIdAbortsIf { creator: account_addr, - collection: collection, - name: name + collection, + name }; aborts_if !table::spec_contains(collections.collection_data, collection); aborts_if table::spec_contains(collections.token_data, token_data_id); @@ -752,7 +751,7 @@ spec aptos_token::token { include DirectDepositAbortsIf { account_addr: receiver, - token_id: token_id, + token_id, token_amount: amount, }; } diff --git a/aptos-move/framework/aptos-token/sources/token_event_store.move b/aptos-move/framework/aptos-token/sources/token_event_store.move index 0171e9cb9f88f..f3ff1b20ca6fe 100644 --- a/aptos-move/framework/aptos-token/sources/token_event_store.move +++ b/aptos-move/framework/aptos-token/sources/token_event_store.move @@ -59,7 +59,7 @@ module aptos_token::token_event_store { #[event] /// Event emitted when the collection maximum is mutated - struct CollectionMaxiumMutate has drop, store { + struct CollectionMaximumMutate has drop, store { creator_addr: address, collection_name: String, old_maximum: u64, @@ -230,7 +230,7 @@ module aptos_token::token_event_store { } /// Emit the collection uri mutation event - public(friend) fun emit_collection_uri_mutate_event(creator: &signer, collection: String, old_uri: String, new_uri: String) acquires TokenEventStoreV1 { + friend fun emit_collection_uri_mutate_event(creator: &signer, collection: String, old_uri: String, new_uri: String) acquires TokenEventStoreV1 { let event = CollectionUriMutateEvent { creator_addr: signer::address_of(creator), collection_name: collection, @@ -238,7 +238,7 @@ module aptos_token::token_event_store { new_uri, }; initialize_token_event_store(creator); - let token_event_store = borrow_global_mut(signer::address_of(creator)); + let token_event_store = &mut TokenEventStoreV1[signer::address_of(creator)]; if (std::features::module_event_migration_enabled()) { event::emit( CollectionUriMutate { @@ -248,15 +248,16 @@ module aptos_token::token_event_store { new_uri, } ); + } else { + event::emit_event( + &mut token_event_store.collection_uri_mutate_events, + event, + ); }; - event::emit_event( - &mut token_event_store.collection_uri_mutate_events, - event, - ); } /// Emit the collection description mutation event - public(friend) fun emit_collection_description_mutate_event(creator: &signer, collection: String, old_description: String, new_description: String) acquires TokenEventStoreV1 { + friend fun emit_collection_description_mutate_event(creator: &signer, collection: String, old_description: String, new_description: String) acquires TokenEventStoreV1 { let event = CollectionDescriptionMutateEvent { creator_addr: signer::address_of(creator), collection_name: collection, @@ -264,7 +265,7 @@ module aptos_token::token_event_store { new_description, }; initialize_token_event_store(creator); - let token_event_store = borrow_global_mut(signer::address_of(creator)); + let token_event_store = &mut TokenEventStoreV1[signer::address_of(creator)]; if (std::features::module_event_migration_enabled()) { event::emit( CollectionDescriptionMutate { @@ -274,15 +275,16 @@ module aptos_token::token_event_store { new_description, } ); - }; - event::emit_event( - &mut token_event_store.collection_description_mutate_events, - event, - ); + } else { + event::emit_event( + &mut token_event_store.collection_description_mutate_events, + event, + ); + } } /// Emit the collection maximum mutation event - public(friend) fun emit_collection_maximum_mutate_event(creator: &signer, collection: String, old_maximum: u64, new_maximum: u64) acquires TokenEventStoreV1 { + friend fun emit_collection_maximum_mutate_event(creator: &signer, collection: String, old_maximum: u64, new_maximum: u64) acquires TokenEventStoreV1 { let event = CollectionMaxiumMutateEvent { creator_addr: signer::address_of(creator), collection_name: collection, @@ -290,45 +292,47 @@ module aptos_token::token_event_store { new_maximum, }; initialize_token_event_store(creator); - let token_event_store = borrow_global_mut(signer::address_of(creator)); + let token_event_store = &mut TokenEventStoreV1[signer::address_of(creator)]; if (std::features::module_event_migration_enabled()) { event::emit( - CollectionMaxiumMutate { + CollectionMaximumMutate { creator_addr: signer::address_of(creator), collection_name: collection, old_maximum, new_maximum, } ); + } else { + event::emit_event( + &mut token_event_store.collection_maximum_mutate_events, + event, + ); }; - event::emit_event( - &mut token_event_store.collection_maximum_mutate_events, - event, - ); } /// Emit the direct opt-in event - public(friend) fun emit_token_opt_in_event(account: &signer, opt_in: bool) acquires TokenEventStoreV1 { + friend fun emit_token_opt_in_event(account: &signer, opt_in: bool) acquires TokenEventStoreV1 { let opt_in_event = OptInTransferEvent { opt_in, }; initialize_token_event_store(account); - let token_event_store = borrow_global_mut(signer::address_of(account)); + let token_event_store = &mut TokenEventStoreV1[signer::address_of(account)]; if (std::features::module_event_migration_enabled()) { event::emit( OptInTransfer { account_address: signer::address_of(account), opt_in, }); - }; - event::emit_event( - &mut token_event_store.opt_in_events, - opt_in_event, - ); + } else { + event::emit_event( + &mut token_event_store.opt_in_events, + opt_in_event, + ); + } } /// Emit URI mutation event - public(friend) fun emit_token_uri_mutate_event( + friend fun emit_token_uri_mutate_event( creator: &signer, collection: String, token: String, @@ -346,7 +350,7 @@ module aptos_token::token_event_store { }; initialize_token_event_store(creator); - let token_event_store = borrow_global_mut(creator_addr); + let token_event_store = &mut TokenEventStoreV1[creator_addr]; if (std::features::module_event_migration_enabled()) { event::emit( UriMutation { @@ -356,15 +360,16 @@ module aptos_token::token_event_store { old_uri, new_uri, }); + } else { + event::emit_event( + &mut token_event_store.uri_mutate_events, + event, + ); }; - event::emit_event( - &mut token_event_store.uri_mutate_events, - event, - ); } /// Emit tokendata property map mutation event - public(friend) fun emit_default_property_mutate_event( + friend fun emit_default_property_mutate_event( creator: &signer, collection: String, token: String, @@ -384,7 +389,7 @@ module aptos_token::token_event_store { }; initialize_token_event_store(creator); - let token_event_store = borrow_global_mut(creator_addr); + let token_event_store = &mut TokenEventStoreV1[creator_addr]; if (std::features::module_event_migration_enabled()) { event::emit( DefaultPropertyMutate { @@ -395,15 +400,16 @@ module aptos_token::token_event_store { old_values, new_values, }); + } else { + event::emit_event( + &mut token_event_store.default_property_mutate_events, + event, + ); }; - event::emit_event( - &mut token_event_store.default_property_mutate_events, - event, - ); } /// Emit description mutation event - public(friend) fun emit_token_descrition_mutate_event( + friend fun emit_token_descrition_mutate_event( creator: &signer, collection: String, token: String, @@ -421,7 +427,7 @@ module aptos_token::token_event_store { }; initialize_token_event_store(creator); - let token_event_store = borrow_global_mut(creator_addr); + let token_event_store = &mut TokenEventStoreV1[creator_addr]; if (std::features::module_event_migration_enabled()) { event::emit( DescriptionMutate { @@ -431,15 +437,16 @@ module aptos_token::token_event_store { old_description, new_description, }); + } else { + event::emit_event( + &mut token_event_store.description_mutate_events, + event, + ); }; - event::emit_event( - &mut token_event_store.description_mutate_events, - event, - ); } /// Emit royalty mutation event - public(friend) fun emit_token_royalty_mutate_event( + friend fun emit_token_royalty_mutate_event( creator: &signer, collection: String, token: String, @@ -464,7 +471,7 @@ module aptos_token::token_event_store { }; initialize_token_event_store(creator); - let token_event_store = borrow_global_mut(creator_addr); + let token_event_store = &mut TokenEventStoreV1[creator_addr]; if (std::features::module_event_migration_enabled()) { event::emit( RoyaltyMutate { @@ -478,15 +485,16 @@ module aptos_token::token_event_store { new_royalty_denominator, new_royalty_payee_addr, }); + } else { + event::emit_event( + &mut token_event_store.royalty_mutate_events, + event, + ); }; - event::emit_event( - &mut token_event_store.royalty_mutate_events, - event, - ); } /// Emit maximum mutation event - public(friend) fun emit_token_maximum_mutate_event( + friend fun emit_token_maximum_mutate_event( creator: &signer, collection: String, token: String, @@ -504,7 +512,7 @@ module aptos_token::token_event_store { }; initialize_token_event_store(creator); - let token_event_store = borrow_global_mut(creator_addr); + let token_event_store = &mut TokenEventStoreV1[creator_addr]; if (std::features::module_event_migration_enabled()) { event::emit( MaximumMutate { @@ -514,10 +522,20 @@ module aptos_token::token_event_store { old_maximum, new_maximum, }); + } else { + event::emit_event( + &mut token_event_store.maximum_mutate_events, + event, + ); }; - event::emit_event( - &mut token_event_store.maximum_mutate_events, - event, - ); + } + + #[deprecated] + #[event] + struct CollectionMaxiumMutate has drop, store { + creator_addr: address, + collection_name: String, + old_maximum: u64, + new_maximum: u64, } } diff --git a/aptos-move/framework/aptos-token/sources/token_event_store.spec.move b/aptos-move/framework/aptos-token/sources/token_event_store.spec.move index f05fe20a6a759..33afa002c447c 100644 --- a/aptos-move/framework/aptos-token/sources/token_event_store.spec.move +++ b/aptos-move/framework/aptos-token/sources/token_event_store.spec.move @@ -1,11 +1,10 @@ spec aptos_token::token_event_store { spec module { - pragma verify = true; + pragma verify = false; pragma aborts_if_is_strict; } spec initialize_token_event_store(acct: &signer) { - pragma verify = true; let addr = signer::address_of(acct); include InitializeTokenEventStoreAbortsIf {creator : acct}; } diff --git a/aptos-move/framework/aptos-token/sources/token_transfers.move b/aptos-move/framework/aptos-token/sources/token_transfers.move index 6a8ec33b5ae2b..d98c9433626b7 100644 --- a/aptos-move/framework/aptos-token/sources/token_transfers.move +++ b/aptos-move/framework/aptos-token/sources/token_transfers.move @@ -36,7 +36,8 @@ module aptos_token::token_transfers { } #[event] - struct TokenOfferEvent has drop, store { + struct Offer has drop, store { + account: address, to_address: address, token_id: TokenId, amount: u64, @@ -50,7 +51,8 @@ module aptos_token::token_transfers { } #[event] - struct TokenCancelOffer has drop, store { + struct CancelOffer has drop, store { + account: address, to_address: address, token_id: TokenId, amount: u64, @@ -64,7 +66,8 @@ module aptos_token::token_transfers { } #[event] - struct TokenClaim has drop, store { + struct Claim has drop, store { + account: address, to_address: address, token_id: TokenId, amount: u64, @@ -114,33 +117,35 @@ module aptos_token::token_transfers { }; let pending_claims = - &mut borrow_global_mut(sender_addr).pending_claims; + &mut PendingClaims[sender_addr].pending_claims; let token_offer_id = create_token_offer_id(receiver, token_id); let token = token::withdraw_token(sender, token_id, amount); - if (!table::contains(pending_claims, token_offer_id)) { - table::add(pending_claims, token_offer_id, token); + if (!pending_claims.contains(token_offer_id)) { + pending_claims.add(token_offer_id, token); } else { - let dst_token = table::borrow_mut(pending_claims, token_offer_id); + let dst_token = pending_claims.borrow_mut(token_offer_id); token::merge(dst_token, token); }; if (std::features::module_event_migration_enabled()) { event::emit( - TokenOffer { + Offer { + account: sender_addr, to_address: receiver, token_id, amount, } ) - }; - event::emit_event( - &mut borrow_global_mut(sender_addr).offer_events, - TokenOfferEvent { - to_address: receiver, - token_id, - amount, - }, - ); + } else { + event::emit_event( + &mut PendingClaims[sender_addr].offer_events, + TokenOfferEvent { + to_address: receiver, + token_id, + amount, + }, + ); + } } public entry fun claim_script( @@ -162,30 +167,32 @@ module aptos_token::token_transfers { ) acquires PendingClaims { assert!(exists(sender), ETOKEN_OFFER_NOT_EXIST); let pending_claims = - &mut borrow_global_mut(sender).pending_claims; + &mut PendingClaims[sender].pending_claims; let token_offer_id = create_token_offer_id(signer::address_of(receiver), token_id); - assert!(table::contains(pending_claims, token_offer_id), error::not_found(ETOKEN_OFFER_NOT_EXIST)); - let tokens = table::remove(pending_claims, token_offer_id); + assert!(pending_claims.contains(token_offer_id), error::not_found(ETOKEN_OFFER_NOT_EXIST)); + let tokens = pending_claims.remove(token_offer_id); let amount = token::get_token_amount(&tokens); token::deposit_token(receiver, tokens); if (std::features::module_event_migration_enabled()) { event::emit( - TokenClaim { + Claim { + account: sender, to_address: signer::address_of(receiver), token_id, amount, } ) + } else { + event::emit_event( + &mut PendingClaims[sender].claim_events, + TokenClaimEvent { + to_address: signer::address_of(receiver), + token_id, + amount, + }, + ); }; - event::emit_event( - &mut borrow_global_mut(sender).claim_events, - TokenClaimEvent { - to_address: signer::address_of(receiver), - token_id, - amount, - }, - ); } public entry fun cancel_offer_script( @@ -210,28 +217,30 @@ module aptos_token::token_transfers { let token_offer_id = create_token_offer_id(receiver, token_id); assert!(exists(sender_addr), ETOKEN_OFFER_NOT_EXIST); let pending_claims = - &mut borrow_global_mut(sender_addr).pending_claims; - let token = table::remove(pending_claims, token_offer_id); + &mut PendingClaims[sender_addr].pending_claims; + let token = pending_claims.remove(token_offer_id); let amount = token::get_token_amount(&token); token::deposit_token(sender, token); if (std::features::module_event_migration_enabled()) { event::emit( - TokenCancelOffer { + CancelOffer { + account: sender_addr, to_address: receiver, token_id, amount, }, ) - }; - event::emit_event( - &mut borrow_global_mut(sender_addr).cancel_offer_events, - TokenCancelOfferEvent { - to_address: receiver, - token_id, - amount, - }, - ); + } else { + event::emit_event( + &mut PendingClaims[sender_addr].cancel_offer_events, + TokenCancelOfferEvent { + to_address: receiver, + token_id, + amount, + }, + ); + } } #[test(creator = @0x1, owner = @0x2)] @@ -324,4 +333,29 @@ module aptos_token::token_transfers { 0 ) } + + #[deprecated] + #[event] + struct TokenOfferEvent has drop, store { + to_address: address, + token_id: TokenId, + amount: u64, + } + + #[deprecated] + #[event] + struct TokenCancelOffer has drop, store { + to_address: address, + token_id: TokenId, + amount: u64, + } + + #[deprecated] + #[event] + struct TokenClaim has drop, store { + to_address: address, + token_id: TokenId, + amount: u64, + } + } diff --git a/aptos-move/framework/aptos-token/sources/token_transfers.spec.move b/aptos-move/framework/aptos-token/sources/token_transfers.spec.move index b87fda41a7a82..7e4bb278e124a 100644 --- a/aptos-move/framework/aptos-token/sources/token_transfers.spec.move +++ b/aptos-move/framework/aptos-token/sources/token_transfers.spec.move @@ -1,6 +1,6 @@ spec aptos_token::token_transfers { spec module { - pragma verify = true; + pragma verify = false; pragma aborts_if_is_strict; } diff --git a/aptos-move/framework/cached-packages/build.rs b/aptos-move/framework/cached-packages/build.rs index 581e8c7b32f46..7d85ac5810f0f 100644 --- a/aptos-move/framework/cached-packages/build.rs +++ b/aptos-move/framework/cached-packages/build.rs @@ -13,6 +13,20 @@ fn main() -> Result<()> { // Get the previous directory let mut prev_dir = current_dir; prev_dir.pop(); + println!( + "cargo:rerun-if-changed={}", + prev_dir + .join("aptos-experimental") + .join("sources") + .display() + ); + println!( + "cargo:rerun-if-changed={}", + prev_dir + .join("aptos-experimental") + .join("Move.toml") + .display() + ); println!( "cargo:rerun-if-changed={}", prev_dir diff --git a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs index 4fc3a0b4fa1c4..b81c2e4880306 100644 --- a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs @@ -109,7 +109,7 @@ pub enum EntryFunctionCall { /// Here is an example attack if we don't ask for the second signature `cap_update_table`: /// Alice has rotated her account `addr_a` to `new_addr_a`. As a result, the following entry is created, to help Alice when recovering her wallet: /// `OriginatingAddress[new_addr_a]` -> `addr_a` - /// Alice has had bad day: her laptop blew up and she needs to reset her account on a new one. + /// Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one. /// (Fortunately, she still has her secret key `new_sk_a` associated with her new address `new_addr_a`, so she can do this.) /// /// But Bob likes to mess with Alice. @@ -136,10 +136,22 @@ pub enum EntryFunctionCall { /// does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we need this functionality due to /// the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in /// the format expected in `rotate_authentication_key`. + /// + /// If you'd like to followup with updating the `OriginatingAddress` table, you can call + /// `set_originating_address()`. AccountRotateAuthenticationKeyCall { new_auth_key: Vec, }, + /// Private entry function for key rotation that allows the signer to update their authentication key from a given public key. + /// This function will abort if the scheme is not recognized or if new_public_key_bytes is not a valid public key for the given scheme. + /// + /// Note: This function does not update the `OriginatingAddress` table. + AccountRotateAuthenticationKeyFromPublicKey { + scheme: u8, + new_public_key_bytes: Vec, + }, + AccountRotateAuthenticationKeyWithRotationCapability { rotation_cap_offerer_address: AccountAddress, new_scheme: u8, @@ -147,6 +159,102 @@ pub enum EntryFunctionCall { cap_update_table: Vec, }, + /// For the given account, add an entry to `OriginatingAddress` table mapping the account's + /// authentication key to the account's address. + /// + /// Can be used as a followup to `rotate_authentication_key_call()` to reconcile the + /// `OriginatingAddress` table, or to establish a mapping for a new account that has not yet had + /// its authentication key rotated. + /// + /// Aborts if there is already an entry in the `OriginatingAddress` table for the account's + /// authentication key. + /// + /// Kept as a private entry function to ensure that after an unproven rotation via + /// `rotate_authentication_key_call()`, the `OriginatingAddress` table is only updated under the + /// authority of the new authentication key. + AccountSetOriginatingAddress {}, + + /// Upserts an ED25519 backup key to an account that has a keyless public key as its original public key by converting the account's authentication key + /// to a multi-key of the original keyless public key and the new backup key that requires 1 signature from either key to authenticate. + /// This function takes a the account's original keyless public key and a ED25519 backup public key and rotates the account's authentication key to a multi-key of + /// the original keyless public key and the new backup key that requires 1 signature from either key to authenticate. + /// + /// Note: This function emits a `KeyRotationToMultiPublicKey` event marking both keys as verified since the keyless public key + /// is the original public key of the account and the new backup key has been validated via verifying the challenge signed by the new backup key. + /// + /// # Arguments + /// * `account` - The signer representing the keyless account + /// * `keyless_public_key` - The original keyless public key of the account (wrapped in an AnyPublicKey) + /// * `backup_public_key` - The ED25519 public key to add as a backup + /// * `backup_key_proof` - A signature from the backup key proving ownership + /// + /// # Aborts + /// * If the any of inputs deserialize incorrectly + /// * If the provided public key is not a keyless public key + /// * If the keyless public key is not the original public key of the account + /// * If the backup key proof signature is invalid + /// + /// # Events + /// * Emits a `KeyRotationToMultiPublicKey` event with the new multi-key configuration + AccountUpsertEd25519BackupKeyOnKeylessAccount { + keyless_public_key: Vec, + backup_public_key: Vec, + backup_key_proof: Vec, + }, + + /// Add dispatchable authentication function that enables account abstraction via this function. + /// Note: it is a private entry function that can only be called directly from transaction. + AccountAbstractionAddAuthenticationFunction { + module_address: AccountAddress, + module_name: Vec, + function_name: Vec, + }, + + AccountAbstractionAddDispatchableAuthenticationFunction { + _module_address: AccountAddress, + _module_name: Vec, + _function_name: Vec, + }, + + AccountAbstractionInitialize {}, + + /// Add dispatchable derivable authentication function, that enables account abstraction via this function. + /// This means all accounts within the domain can use it to authenticate, without needing an initialization (unlike non-domain AA). + /// dispatchable function needs to verify two things: + /// - that signing_data.derivable_abstract_signature() is a valid signature of signing_data.digest() (just like regular AA) + /// - that signing_data.derivable_abstract_public_key() is correct identity representing the authenticator + /// (missing this step would allow impersonation) + /// + /// Note: This is public entry function, as it requires framework signer, and that can + /// only be obtained as a part of the governance script. + AccountAbstractionRegisterDerivableAuthenticationFunction { + module_address: AccountAddress, + module_name: Vec, + function_name: Vec, + }, + + /// Remove dispatchable authentication function that enables account abstraction via this function. + /// dispatchable function needs to verify that signing_data.authenticator() is a valid signature of signing_data.digest(). + /// Note: it is a private entry function that can only be called directly from transaction. + AccountAbstractionRemoveAuthenticationFunction { + module_address: AccountAddress, + module_name: Vec, + function_name: Vec, + }, + + /// Remove dispatchable authenticator so that all dispatchable authentication functions will be removed as well. + /// After calling this function, the account is not abstracted at all. + /// Note: it is a private entry function that can only be called directly from transaction. + AccountAbstractionRemoveAuthenticator {}, + + AccountAbstractionRemoveDispatchableAuthenticationFunction { + _module_address: AccountAddress, + _module_name: Vec, + _function_name: Vec, + }, + + AccountAbstractionRemoveDispatchableAuthenticator {}, + /// Batch version of APT transfer. AptosAccountBatchTransfer { recipients: Vec, @@ -165,6 +273,17 @@ pub enum EntryFunctionCall { auth_key: AccountAddress, }, + /// APT Primary Fungible Store specific specialized functions, + /// Utilized internally once migration of APT to FungibleAsset is complete. + /// Convenient function to transfer APT to a recipient account that might not exist. + /// This would create the recipient APT PFS first, which also registers it to receive APT, before transferring. + /// TODO: once migration is complete, rename to just "transfer_only" and make it an entry function (for cheapest way + /// to transfer APT) - if we want to allow APT PFS without account itself + AptosAccountFungibleTransferOnly { + to: AccountAddress, + amount: u64, + }, + /// Set whether `account` can receive direct transfers of coins that they have not explicitly registered to receive. AptosAccountSetAllowDirectCoinTransfers { allow: bool, @@ -280,53 +399,6 @@ pub enum EntryFunctionCall { should_pass: bool, }, - /// Aborts a bridge transfer if the time lock has expired. - /// - /// @param caller The signer representing the bridge operator. - /// @param bridge_transfer_id The unique identifier for the bridge transfer. - /// @abort If the caller is not the bridge operator or if the time lock has not expired. - AtomicBridgeCounterpartyAbortBridgeTransfer { - bridge_transfer_id: Vec, - }, - - /// Bridge operator can complete the transfer - AtomicBridgeInitiatorCompleteBridgeTransfer { - bridge_transfer_id: Vec, - pre_image: Vec, - }, - - /// Initiate a bridge transfer of ETH from Movement to the base layer - /// Anyone can initiate a bridge transfer from the source chain - /// The amount is burnt from the initiator - AtomicBridgeInitiatorInitiateBridgeTransfer { - recipient: Vec, - hash_lock: Vec, - amount: u64, - }, - - /// Locks assets for a bridge transfer by the initiator. - /// - /// @param caller The signer representing the bridge operator. - /// @param initiator The initiator's Ethereum address as a vector of bytes. - /// @param bridge_transfer_id The unique identifier for the bridge transfer. - /// @param hash_lock The hash lock for securing the transfer. - /// @param time_lock The time lock duration for the transfer. - /// @param recipient The address of the recipient on the Aptos blockchain. - /// @param amount The amount of assets to be locked. - /// @abort If the caller is not the bridge operator. - AtomicBridgeCounterpartyLockBridgeTransferAssets { - initiator: Vec, - bridge_transfer_id: Vec, - hash_lock: Vec, - recipient: AccountAddress, - amount: u64, - }, - - /// Anyone can refund the transfer on the source chain once time lock has passed - AtomicBridgeInitiatorRefundBridgeTransfer { - bridge_transfer_id: Vec, - }, - /// Same as `publish_package` but as an entry function which can be called as a transaction. Because /// of current restrictions for txn parameters, the metadata needs to be passed in serialized form. CodePublishPackageTxn { @@ -341,6 +413,12 @@ pub enum EntryFunctionCall { coin_type: TypeTag, }, + /// Migrate to fungible store for `CoinType` if not yet. + CoinMigrateCoinStoreToFungibleStore { + coin_type: TypeTag, + accounts: Vec, + }, + /// Voluntarily migrate to fungible store for `CoinType` if not yet. CoinMigrateToFungibleStore { coin_type: TypeTag, @@ -433,9 +511,9 @@ pub enum EntryFunctionCall { new_beneficiary: AccountAddress, }, - /// Allows an owner to change the delegated voter of the underlying stake pool. + /// Deprecated. Use the partial governance voting flow instead. DelegationPoolSetDelegatedVoter { - new_voter: AccountAddress, + _new_voter: AccountAddress, }, /// Allows an owner to change the operator of the underlying stake pool. @@ -479,12 +557,78 @@ pub enum EntryFunctionCall { amount: u64, }, + /// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should + /// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. + /// + /// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. + /// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. + /// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. + /// + /// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - + /// ```json + /// { + /// "keys": [ + /// { + /// "alg": "RS256", + /// "use": "sig", + /// "kty": "RSA", + /// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", + /// "kid": "d7b939771a7800c413f90051012d975981916d71", + /// "e": "AQAB" + /// }, + /// { + /// "kty": "RSA", + /// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", + /// "alg": "RS256", + /// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", + /// "e": "AQAB", + /// "use": "sig" + /// } + /// ] + /// } + /// ``` + /// + /// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector + /// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 + /// the corresponding attribute in the second JWK as shown below. + /// + /// ```move + /// use std::string::utf8; + /// aptos_framework::jwks::update_federated_jwk_set( + /// jwk_owner, + /// b"https://accounts.google.com", + /// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], + /// vector[utf8(b"RS256"), utf8(b"RS256")], + /// vector[utf8(b"AQAB"), utf8(b"AQAB")], + /// vector[ + /// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), + /// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") + /// ] + /// ) + /// ``` + /// + /// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md + /// + /// NOTE: Currently only RSA keys are supported. + JwksUpdateFederatedJwkSet { + iss: Vec, + kid_vec: Vec>, + alg_vec: Vec>, + e_vec: Vec>, + n_vec: Vec>, + }, + /// Withdraw an `amount` of coin `CoinType` from `account` and burn it. ManagedCoinBurn { coin_type: TypeTag, amount: u64, }, + /// Destroys capabilities from the account, so that the user no longer has access to mint or burn. + ManagedCoinDestroyCaps { + coin_type: TypeTag, + }, + /// Initialize new coin `CoinType` in Aptos Blockchain. /// Mint and Burn Capabilities will be stored under `account` in `Capabilities` resource. ManagedCoinInitialize { @@ -592,6 +736,33 @@ pub enum EntryFunctionCall { metadata_values: Vec>, }, + /// Private entry function that creates a new multisig account on top of an existing account and immediately rotate + /// the origin auth key to 0x0. + /// + /// Note: If the original account is a resource account, this does not revoke all control over it as if any + /// SignerCapability of the resource account still exists, it can still be used to generate the signer for the + /// account. + MultisigAccountCreateWithExistingAccountAndRevokeAuthKeyCall { + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, + }, + + /// Private entry function that creates a new multisig account on top of an existing account. + /// + /// This offers a migration path for an existing account with any type of auth key. + /// + /// Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth + /// key after they are fully migrated to the new multisig account. Alternatively, they can call + /// create_with_existing_account_and_revoke_auth_key_call instead. + MultisigAccountCreateWithExistingAccountCall { + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, + }, + /// Creates a new multisig account with the specified additional owner list and signatures required. /// /// @param additional_owners The owner account who calls this function cannot be in the additional_owners and there @@ -714,60 +885,11 @@ pub enum EntryFunctionCall { approved: bool, }, - /// Completes a bridge transfer on the destination chain. - - /// @param caller The signer representing the bridge relayer. - /// @param initiator The initiator's Ethereum address as a vector of bytes. - /// @param bridge_transfer_id The unique identifier for the bridge transfer. - /// @param recipient The address of the recipient on the Aptos blockchain. - /// @param amount The amount of assets to be locked. - /// @param nonce The unique nonce for the transfer. - /// @abort If the caller is not the bridge relayer or the transfer has already been processed. - NativeBridgeCompleteBridgeTransfer { - bridge_transfer_id: Vec, - initiator: Vec, - recipient: AccountAddress, - amount: u64, - nonce: u64, - }, - - /// Initiate a bridge transfer of MOVE from Movement to Ethereum - /// Anyone can initiate a bridge transfer from the source chain - /// The amount is burnt from the initiator and the module-level nonce is incremented - /// @param initiator The initiator's Ethereum address as a vector of bytes. - /// @param recipient The address of the recipient on the Aptos blockchain. - /// @param amount The amount of assets to be locked. - NativeBridgeInitiateBridgeTransfer { - recipient: Vec, - amount: u64, - }, - - /// Updates the bridge fee, requiring relayer validation. - /// - /// @param relayer The signer representing the Relayer. - /// @param new_bridge_fee The new bridge fee to be set. - /// @abort If the new bridge fee is the same as the old bridge fee. - NativeBridgeUpdateBridgeFee { - new_bridge_fee: u64, + NonceValidationAddNonceBuckets { + count: u64, }, - /// Updates the insurance budget divider, requiring governance validation. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param new_insurance_budget_divider The new insurance budget divider to be set. - /// @abort If the new insurance budget divider is the same as the old insurance budget divider. - NativeBridgeUpdateInsuranceBudgetDivider { - new_insurance_budget_divider: u64, - }, - - /// Updates the insurance fund, requiring governance validation. - /// - /// @param aptos_framework The signer representing the Aptos framework. - /// @param new_insurance_fund The new insurance fund to be set. - /// @abort If the new insurance fund is the same as the old insurance fund. - NativeBridgeUpdateInsuranceFund { - new_insurance_fund: AccountAddress, - }, + NonceValidationInitializeNonceTable {}, /// Entry function that can be used to transfer, if allow_ungated_transfer is set true. ObjectTransferCall { @@ -784,6 +906,15 @@ pub enum EntryFunctionCall { code: Vec>, }, + /// Revoke all storable permission handle of the signer immediately. + PermissionedSignerRevokeAllHandles {}, + + /// Revoke a specific storable permission handle immediately. This will disallow owner of + /// the storable permission handle to derive signer from it anymore. + PermissionedSignerRevokePermissionStorageAddress { + permissions_storage_addr: AccountAddress, + }, + /// Creates a new resource account and rotates the authentication key to either /// the optional auth key if it is non-empty (though auth keys are 32-bytes) /// or the source accounts current auth key. @@ -1178,6 +1309,10 @@ impl EntryFunctionCall { AccountRotateAuthenticationKeyCall { new_auth_key } => { account_rotate_authentication_key_call(new_auth_key) }, + AccountRotateAuthenticationKeyFromPublicKey { + scheme, + new_public_key_bytes, + } => account_rotate_authentication_key_from_public_key(scheme, new_public_key_bytes), AccountRotateAuthenticationKeyWithRotationCapability { rotation_cap_offerer_address, new_scheme, @@ -1189,6 +1324,66 @@ impl EntryFunctionCall { new_public_key_bytes, cap_update_table, ), + AccountSetOriginatingAddress {} => account_set_originating_address(), + AccountUpsertEd25519BackupKeyOnKeylessAccount { + keyless_public_key, + backup_public_key, + backup_key_proof, + } => account_upsert_ed25519_backup_key_on_keyless_account( + keyless_public_key, + backup_public_key, + backup_key_proof, + ), + AccountAbstractionAddAuthenticationFunction { + module_address, + module_name, + function_name, + } => account_abstraction_add_authentication_function( + module_address, + module_name, + function_name, + ), + AccountAbstractionAddDispatchableAuthenticationFunction { + _module_address, + _module_name, + _function_name, + } => account_abstraction_add_dispatchable_authentication_function( + _module_address, + _module_name, + _function_name, + ), + AccountAbstractionInitialize {} => account_abstraction_initialize(), + AccountAbstractionRegisterDerivableAuthenticationFunction { + module_address, + module_name, + function_name, + } => account_abstraction_register_derivable_authentication_function( + module_address, + module_name, + function_name, + ), + AccountAbstractionRemoveAuthenticationFunction { + module_address, + module_name, + function_name, + } => account_abstraction_remove_authentication_function( + module_address, + module_name, + function_name, + ), + AccountAbstractionRemoveAuthenticator {} => account_abstraction_remove_authenticator(), + AccountAbstractionRemoveDispatchableAuthenticationFunction { + _module_address, + _module_name, + _function_name, + } => account_abstraction_remove_dispatchable_authentication_function( + _module_address, + _module_name, + _function_name, + ), + AccountAbstractionRemoveDispatchableAuthenticator {} => { + account_abstraction_remove_dispatchable_authenticator() + }, AptosAccountBatchTransfer { recipients, amounts, @@ -1199,6 +1394,9 @@ impl EntryFunctionCall { amounts, } => aptos_account_batch_transfer_coins(coin_type, recipients, amounts), AptosAccountCreateAccount { auth_key } => aptos_account_create_account(auth_key), + AptosAccountFungibleTransferOnly { to, amount } => { + aptos_account_fungible_transfer_only(to, amount) + }, AptosAccountSetAllowDirectCoinTransfers { allow } => { aptos_account_set_allow_direct_coin_transfers(allow) }, @@ -1268,40 +1466,16 @@ impl EntryFunctionCall { proposal_id, should_pass, } => aptos_governance_vote(stake_pool, proposal_id, should_pass), - AtomicBridgeCounterpartyAbortBridgeTransfer { bridge_transfer_id } => { - atomic_bridge_counterparty_abort_bridge_transfer(bridge_transfer_id) - }, - AtomicBridgeInitiatorCompleteBridgeTransfer { - bridge_transfer_id, - pre_image, - } => atomic_bridge_initiator_complete_bridge_transfer(bridge_transfer_id, pre_image), - AtomicBridgeInitiatorInitiateBridgeTransfer { - recipient, - hash_lock, - amount, - } => atomic_bridge_initiator_initiate_bridge_transfer(recipient, hash_lock, amount), - AtomicBridgeCounterpartyLockBridgeTransferAssets { - initiator, - bridge_transfer_id, - hash_lock, - recipient, - amount, - } => atomic_bridge_counterparty_lock_bridge_transfer_assets( - initiator, - bridge_transfer_id, - hash_lock, - recipient, - amount, - ), - AtomicBridgeInitiatorRefundBridgeTransfer { bridge_transfer_id } => { - atomic_bridge_initiator_refund_bridge_transfer(bridge_transfer_id) - }, CodePublishPackageTxn { metadata_serialized, code, } => code_publish_package_txn(metadata_serialized, code), CoinCreateCoinConversionMap {} => coin_create_coin_conversion_map(), CoinCreatePairing { coin_type } => coin_create_pairing(coin_type), + CoinMigrateCoinStoreToFungibleStore { + coin_type, + accounts, + } => coin_migrate_coin_store_to_fungible_store(coin_type, accounts), CoinMigrateToFungibleStore { coin_type } => coin_migrate_to_fungible_store(coin_type), CoinTransfer { coin_type, @@ -1362,8 +1536,8 @@ impl EntryFunctionCall { DelegationPoolSetBeneficiaryForOperator { new_beneficiary } => { delegation_pool_set_beneficiary_for_operator(new_beneficiary) }, - DelegationPoolSetDelegatedVoter { new_voter } => { - delegation_pool_set_delegated_voter(new_voter) + DelegationPoolSetDelegatedVoter { _new_voter } => { + delegation_pool_set_delegated_voter(_new_voter) }, DelegationPoolSetOperator { new_operator } => { delegation_pool_set_operator(new_operator) @@ -1388,7 +1562,15 @@ impl EntryFunctionCall { pool_address, amount, } => delegation_pool_withdraw(pool_address, amount), + JwksUpdateFederatedJwkSet { + iss, + kid_vec, + alg_vec, + e_vec, + n_vec, + } => jwks_update_federated_jwk_set(iss, kid_vec, alg_vec, e_vec, n_vec), ManagedCoinBurn { coin_type, amount } => managed_coin_burn(coin_type, amount), + ManagedCoinDestroyCaps { coin_type } => managed_coin_destroy_caps(coin_type), ManagedCoinInitialize { coin_type, name, @@ -1466,6 +1648,28 @@ impl EntryFunctionCall { metadata_keys, metadata_values, ), + MultisigAccountCreateWithExistingAccountAndRevokeAuthKeyCall { + owners, + num_signatures_required, + metadata_keys, + metadata_values, + } => multisig_account_create_with_existing_account_and_revoke_auth_key_call( + owners, + num_signatures_required, + metadata_keys, + metadata_values, + ), + MultisigAccountCreateWithExistingAccountCall { + owners, + num_signatures_required, + metadata_keys, + metadata_values, + } => multisig_account_create_with_existing_account_call( + owners, + num_signatures_required, + metadata_keys, + metadata_values, + ), MultisigAccountCreateWithOwners { additional_owners, num_signatures_required, @@ -1552,36 +1756,17 @@ impl EntryFunctionCall { sequence_number, approved, } => multisig_account_vote_transanction(multisig_account, sequence_number, approved), - NativeBridgeCompleteBridgeTransfer { - bridge_transfer_id, - initiator, - recipient, - amount, - nonce, - } => native_bridge_complete_bridge_transfer( - bridge_transfer_id, - initiator, - recipient, - amount, - nonce, - ), - NativeBridgeInitiateBridgeTransfer { recipient, amount } => { - native_bridge_initiate_bridge_transfer(recipient, amount) - }, - NativeBridgeUpdateBridgeFee { new_bridge_fee } => { - native_bridge_update_bridge_fee(new_bridge_fee) - }, - NativeBridgeUpdateInsuranceBudgetDivider { - new_insurance_budget_divider, - } => native_bridge_update_insurance_budget_divider(new_insurance_budget_divider), - NativeBridgeUpdateInsuranceFund { new_insurance_fund } => { - native_bridge_update_insurance_fund(new_insurance_fund) - }, + NonceValidationAddNonceBuckets { count } => nonce_validation_add_nonce_buckets(count), + NonceValidationInitializeNonceTable {} => nonce_validation_initialize_nonce_table(), ObjectTransferCall { object, to } => object_transfer_call(object, to), ObjectCodeDeploymentPublish { metadata_serialized, code, } => object_code_deployment_publish(metadata_serialized, code), + PermissionedSignerRevokeAllHandles {} => permissioned_signer_revoke_all_handles(), + PermissionedSignerRevokePermissionStorageAddress { + permissions_storage_addr, + } => permissioned_signer_revoke_permission_storage_address(permissions_storage_addr), ResourceAccountCreateResourceAccount { seed, optional_auth_key, @@ -1964,7 +2149,7 @@ pub fn account_revoke_signer_capability( /// Here is an example attack if we don't ask for the second signature `cap_update_table`: /// Alice has rotated her account `addr_a` to `new_addr_a`. As a result, the following entry is created, to help Alice when recovering her wallet: /// `OriginatingAddress[new_addr_a]` -> `addr_a` -/// Alice has had bad day: her laptop blew up and she needs to reset her account on a new one. +/// Alice has had a bad day: her laptop blew up and she needs to reset her account on a new one. /// (Fortunately, she still has her secret key `new_sk_a` associated with her new address `new_addr_a`, so she can do this.) /// /// But Bob likes to mess with Alice. @@ -2011,6 +2196,9 @@ pub fn account_rotate_authentication_key( /// does not come with a proof-of-knowledge of the underlying SK. Nonetheless, we need this functionality due to /// the introduction of non-standard key algorithms, such as passkeys, which cannot produce proofs-of-knowledge in /// the format expected in `rotate_authentication_key`. +/// +/// If you'd like to followup with updating the `OriginatingAddress` table, you can call +/// `set_originating_address()`. pub fn account_rotate_authentication_key_call(new_auth_key: Vec) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( @@ -2026,6 +2214,31 @@ pub fn account_rotate_authentication_key_call(new_auth_key: Vec) -> Transact )) } +/// Private entry function for key rotation that allows the signer to update their authentication key from a given public key. +/// This function will abort if the scheme is not recognized or if new_public_key_bytes is not a valid public key for the given scheme. +/// +/// Note: This function does not update the `OriginatingAddress` table. +pub fn account_rotate_authentication_key_from_public_key( + scheme: u8, + new_public_key_bytes: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account").to_owned(), + ), + ident_str!("rotate_authentication_key_from_public_key").to_owned(), + vec![], + vec![ + bcs::to_bytes(&scheme).unwrap(), + bcs::to_bytes(&new_public_key_bytes).unwrap(), + ], + )) +} + pub fn account_rotate_authentication_key_with_rotation_capability( rotation_cap_offerer_address: AccountAddress, new_scheme: u8, @@ -2051,6 +2264,256 @@ pub fn account_rotate_authentication_key_with_rotation_capability( )) } +/// For the given account, add an entry to `OriginatingAddress` table mapping the account's +/// authentication key to the account's address. +/// +/// Can be used as a followup to `rotate_authentication_key_call()` to reconcile the +/// `OriginatingAddress` table, or to establish a mapping for a new account that has not yet had +/// its authentication key rotated. +/// +/// Aborts if there is already an entry in the `OriginatingAddress` table for the account's +/// authentication key. +/// +/// Kept as a private entry function to ensure that after an unproven rotation via +/// `rotate_authentication_key_call()`, the `OriginatingAddress` table is only updated under the +/// authority of the new authentication key. +pub fn account_set_originating_address() -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account").to_owned(), + ), + ident_str!("set_originating_address").to_owned(), + vec![], + vec![], + )) +} + +/// Upserts an ED25519 backup key to an account that has a keyless public key as its original public key by converting the account's authentication key +/// to a multi-key of the original keyless public key and the new backup key that requires 1 signature from either key to authenticate. +/// This function takes a the account's original keyless public key and a ED25519 backup public key and rotates the account's authentication key to a multi-key of +/// the original keyless public key and the new backup key that requires 1 signature from either key to authenticate. +/// +/// Note: This function emits a `KeyRotationToMultiPublicKey` event marking both keys as verified since the keyless public key +/// is the original public key of the account and the new backup key has been validated via verifying the challenge signed by the new backup key. +/// +/// # Arguments +/// * `account` - The signer representing the keyless account +/// * `keyless_public_key` - The original keyless public key of the account (wrapped in an AnyPublicKey) +/// * `backup_public_key` - The ED25519 public key to add as a backup +/// * `backup_key_proof` - A signature from the backup key proving ownership +/// +/// # Aborts +/// * If the any of inputs deserialize incorrectly +/// * If the provided public key is not a keyless public key +/// * If the keyless public key is not the original public key of the account +/// * If the backup key proof signature is invalid +/// +/// # Events +/// * Emits a `KeyRotationToMultiPublicKey` event with the new multi-key configuration +pub fn account_upsert_ed25519_backup_key_on_keyless_account( + keyless_public_key: Vec, + backup_public_key: Vec, + backup_key_proof: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account").to_owned(), + ), + ident_str!("upsert_ed25519_backup_key_on_keyless_account").to_owned(), + vec![], + vec![ + bcs::to_bytes(&keyless_public_key).unwrap(), + bcs::to_bytes(&backup_public_key).unwrap(), + bcs::to_bytes(&backup_key_proof).unwrap(), + ], + )) +} + +/// Add dispatchable authentication function that enables account abstraction via this function. +/// Note: it is a private entry function that can only be called directly from transaction. +pub fn account_abstraction_add_authentication_function( + module_address: AccountAddress, + module_name: Vec, + function_name: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account_abstraction").to_owned(), + ), + ident_str!("add_authentication_function").to_owned(), + vec![], + vec![ + bcs::to_bytes(&module_address).unwrap(), + bcs::to_bytes(&module_name).unwrap(), + bcs::to_bytes(&function_name).unwrap(), + ], + )) +} + +pub fn account_abstraction_add_dispatchable_authentication_function( + _module_address: AccountAddress, + _module_name: Vec, + _function_name: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account_abstraction").to_owned(), + ), + ident_str!("add_dispatchable_authentication_function").to_owned(), + vec![], + vec![ + bcs::to_bytes(&_module_address).unwrap(), + bcs::to_bytes(&_module_name).unwrap(), + bcs::to_bytes(&_function_name).unwrap(), + ], + )) +} + +pub fn account_abstraction_initialize() -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account_abstraction").to_owned(), + ), + ident_str!("initialize").to_owned(), + vec![], + vec![], + )) +} + +/// Add dispatchable derivable authentication function, that enables account abstraction via this function. +/// This means all accounts within the domain can use it to authenticate, without needing an initialization (unlike non-domain AA). +/// dispatchable function needs to verify two things: +/// - that signing_data.derivable_abstract_signature() is a valid signature of signing_data.digest() (just like regular AA) +/// - that signing_data.derivable_abstract_public_key() is correct identity representing the authenticator +/// (missing this step would allow impersonation) +/// +/// Note: This is public entry function, as it requires framework signer, and that can +/// only be obtained as a part of the governance script. +pub fn account_abstraction_register_derivable_authentication_function( + module_address: AccountAddress, + module_name: Vec, + function_name: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account_abstraction").to_owned(), + ), + ident_str!("register_derivable_authentication_function").to_owned(), + vec![], + vec![ + bcs::to_bytes(&module_address).unwrap(), + bcs::to_bytes(&module_name).unwrap(), + bcs::to_bytes(&function_name).unwrap(), + ], + )) +} + +/// Remove dispatchable authentication function that enables account abstraction via this function. +/// dispatchable function needs to verify that signing_data.authenticator() is a valid signature of signing_data.digest(). +/// Note: it is a private entry function that can only be called directly from transaction. +pub fn account_abstraction_remove_authentication_function( + module_address: AccountAddress, + module_name: Vec, + function_name: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account_abstraction").to_owned(), + ), + ident_str!("remove_authentication_function").to_owned(), + vec![], + vec![ + bcs::to_bytes(&module_address).unwrap(), + bcs::to_bytes(&module_name).unwrap(), + bcs::to_bytes(&function_name).unwrap(), + ], + )) +} + +/// Remove dispatchable authenticator so that all dispatchable authentication functions will be removed as well. +/// After calling this function, the account is not abstracted at all. +/// Note: it is a private entry function that can only be called directly from transaction. +pub fn account_abstraction_remove_authenticator() -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account_abstraction").to_owned(), + ), + ident_str!("remove_authenticator").to_owned(), + vec![], + vec![], + )) +} + +pub fn account_abstraction_remove_dispatchable_authentication_function( + _module_address: AccountAddress, + _module_name: Vec, + _function_name: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account_abstraction").to_owned(), + ), + ident_str!("remove_dispatchable_authentication_function").to_owned(), + vec![], + vec![ + bcs::to_bytes(&_module_address).unwrap(), + bcs::to_bytes(&_module_name).unwrap(), + bcs::to_bytes(&_function_name).unwrap(), + ], + )) +} + +pub fn account_abstraction_remove_dispatchable_authenticator() -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("account_abstraction").to_owned(), + ), + ident_str!("remove_dispatchable_authenticator").to_owned(), + vec![], + vec![], + )) +} + /// Batch version of APT transfer. pub fn aptos_account_batch_transfer( recipients: Vec, @@ -2112,6 +2575,27 @@ pub fn aptos_account_create_account(auth_key: AccountAddress) -> TransactionPayl )) } +/// APT Primary Fungible Store specific specialized functions, +/// Utilized internally once migration of APT to FungibleAsset is complete. +/// Convenient function to transfer APT to a recipient account that might not exist. +/// This would create the recipient APT PFS first, which also registers it to receive APT, before transferring. +/// TODO: once migration is complete, rename to just "transfer_only" and make it an entry function (for cheapest way +/// to transfer APT) - if we want to allow APT PFS without account itself +pub fn aptos_account_fungible_transfer_only(to: AccountAddress, amount: u64) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("aptos_account").to_owned(), + ), + ident_str!("fungible_transfer_only").to_owned(), + vec![], + vec![bcs::to_bytes(&to).unwrap(), bcs::to_bytes(&amount).unwrap()], + )) +} + /// Set whether `account` can receive direct transfers of coins that they have not explicitly registered to receive. pub fn aptos_account_set_allow_direct_coin_transfers(allow: bool) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( @@ -2455,131 +2939,6 @@ pub fn aptos_governance_vote( )) } -/// Aborts a bridge transfer if the time lock has expired. -/// -/// @param caller The signer representing the bridge operator. -/// @param bridge_transfer_id The unique identifier for the bridge transfer. -/// @abort If the caller is not the bridge operator or if the time lock has not expired. -pub fn atomic_bridge_counterparty_abort_bridge_transfer( - bridge_transfer_id: Vec, -) -> TransactionPayload { - TransactionPayload::EntryFunction(EntryFunction::new( - ModuleId::new( - AccountAddress::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, - ]), - ident_str!("atomic_bridge_counterparty").to_owned(), - ), - ident_str!("abort_bridge_transfer").to_owned(), - vec![], - vec![bcs::to_bytes(&bridge_transfer_id).unwrap()], - )) -} - -/// Bridge operator can complete the transfer -pub fn atomic_bridge_initiator_complete_bridge_transfer( - bridge_transfer_id: Vec, - pre_image: Vec, -) -> TransactionPayload { - TransactionPayload::EntryFunction(EntryFunction::new( - ModuleId::new( - AccountAddress::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, - ]), - ident_str!("atomic_bridge_initiator").to_owned(), - ), - ident_str!("complete_bridge_transfer").to_owned(), - vec![], - vec![ - bcs::to_bytes(&bridge_transfer_id).unwrap(), - bcs::to_bytes(&pre_image).unwrap(), - ], - )) -} - -/// Initiate a bridge transfer of ETH from Movement to the base layer -/// Anyone can initiate a bridge transfer from the source chain -/// The amount is burnt from the initiator -pub fn atomic_bridge_initiator_initiate_bridge_transfer( - recipient: Vec, - hash_lock: Vec, - amount: u64, -) -> TransactionPayload { - TransactionPayload::EntryFunction(EntryFunction::new( - ModuleId::new( - AccountAddress::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, - ]), - ident_str!("atomic_bridge_initiator").to_owned(), - ), - ident_str!("initiate_bridge_transfer").to_owned(), - vec![], - vec![ - bcs::to_bytes(&recipient).unwrap(), - bcs::to_bytes(&hash_lock).unwrap(), - bcs::to_bytes(&amount).unwrap(), - ], - )) -} - -/// Locks assets for a bridge transfer by the initiator. -/// -/// @param caller The signer representing the bridge operator. -/// @param initiator The initiator's Ethereum address as a vector of bytes. -/// @param bridge_transfer_id The unique identifier for the bridge transfer. -/// @param hash_lock The hash lock for securing the transfer. -/// @param time_lock The time lock duration for the transfer. -/// @param recipient The address of the recipient on the Aptos blockchain. -/// @param amount The amount of assets to be locked. -/// @abort If the caller is not the bridge operator. -pub fn atomic_bridge_counterparty_lock_bridge_transfer_assets( - initiator: Vec, - bridge_transfer_id: Vec, - hash_lock: Vec, - recipient: AccountAddress, - amount: u64, -) -> TransactionPayload { - TransactionPayload::EntryFunction(EntryFunction::new( - ModuleId::new( - AccountAddress::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, - ]), - ident_str!("atomic_bridge_counterparty").to_owned(), - ), - ident_str!("lock_bridge_transfer_assets").to_owned(), - vec![], - vec![ - bcs::to_bytes(&initiator).unwrap(), - bcs::to_bytes(&bridge_transfer_id).unwrap(), - bcs::to_bytes(&hash_lock).unwrap(), - bcs::to_bytes(&recipient).unwrap(), - bcs::to_bytes(&amount).unwrap(), - ], - )) -} - -/// Anyone can refund the transfer on the source chain once time lock has passed -pub fn atomic_bridge_initiator_refund_bridge_transfer( - bridge_transfer_id: Vec, -) -> TransactionPayload { - TransactionPayload::EntryFunction(EntryFunction::new( - ModuleId::new( - AccountAddress::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, - ]), - ident_str!("atomic_bridge_initiator").to_owned(), - ), - ident_str!("refund_bridge_transfer").to_owned(), - vec![], - vec![bcs::to_bytes(&bridge_transfer_id).unwrap()], - )) -} - /// Same as `publish_package` but as an entry function which can be called as a transaction. Because /// of current restrictions for txn parameters, the metadata needs to be passed in serialized form. pub fn code_publish_package_txn( @@ -2634,6 +2993,25 @@ pub fn coin_create_pairing(coin_type: TypeTag) -> TransactionPayload { )) } +/// Migrate to fungible store for `CoinType` if not yet. +pub fn coin_migrate_coin_store_to_fungible_store( + coin_type: TypeTag, + accounts: Vec, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("coin").to_owned(), + ), + ident_str!("migrate_coin_store_to_fungible_store").to_owned(), + vec![coin_type], + vec![bcs::to_bytes(&accounts).unwrap()], + )) +} + /// Voluntarily migrate to fungible store for `CoinType` if not yet. pub fn coin_migrate_to_fungible_store(coin_type: TypeTag) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( @@ -2926,8 +3304,8 @@ pub fn delegation_pool_set_beneficiary_for_operator( )) } -/// Allows an owner to change the delegated voter of the underlying stake pool. -pub fn delegation_pool_set_delegated_voter(new_voter: AccountAddress) -> TransactionPayload { +/// Deprecated. Use the partial governance voting flow instead. +pub fn delegation_pool_set_delegated_voter(_new_voter: AccountAddress) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( AccountAddress::new([ @@ -2938,7 +3316,7 @@ pub fn delegation_pool_set_delegated_voter(new_voter: AccountAddress) -> Transac ), ident_str!("set_delegated_voter").to_owned(), vec![], - vec![bcs::to_bytes(&new_voter).unwrap()], + vec![bcs::to_bytes(&_new_voter).unwrap()], )) } @@ -3064,8 +3442,104 @@ pub fn delegation_pool_withdraw(pool_address: AccountAddress, amount: u64) -> Tr )) } -/// Withdraw an `amount` of coin `CoinType` from `account` and burn it. -pub fn managed_coin_burn(coin_type: TypeTag, amount: u64) -> TransactionPayload { +/// This can be called to install or update a set of JWKs for a federated OIDC provider. This function should +/// be invoked to intially install a set of JWKs or to update a set of JWKs when a keypair is rotated. +/// +/// The `iss` parameter is the value of the `iss` claim on the JWTs that are to be verified by the JWK set. +/// `kid_vec`, `alg_vec`, `e_vec`, `n_vec` are String vectors of the JWK attributes `kid`, `alg`, `e` and `n` respectively. +/// See https://datatracker.ietf.org/doc/html/rfc7517#section-4 for more details about the JWK attributes aforementioned. +/// +/// For the example JWK set snapshot below containing 2 keys for Google found at https://www.googleapis.com/oauth2/v3/certs - +/// ```json +/// { +/// "keys": [ +/// { +/// "alg": "RS256", +/// "use": "sig", +/// "kty": "RSA", +/// "n": "wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw", +/// "kid": "d7b939771a7800c413f90051012d975981916d71", +/// "e": "AQAB" +/// }, +/// { +/// "kty": "RSA", +/// "kid": "b2620d5e7f132b52afe8875cdf3776c064249d04", +/// "alg": "RS256", +/// "n": "pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w", +/// "e": "AQAB", +/// "use": "sig" +/// } +/// ] +/// } +/// ``` +/// +/// We can call update_federated_jwk_set for Google's `iss` - "https://accounts.google.com" and for each vector +/// argument `kid_vec`, `alg_vec`, `e_vec`, `n_vec`, we set in index 0 the corresponding attribute in the first JWK and we set in index 1 +/// the corresponding attribute in the second JWK as shown below. +/// +/// ```move +/// use std::string::utf8; +/// aptos_framework::jwks::update_federated_jwk_set( +/// jwk_owner, +/// b"https://accounts.google.com", +/// vector[utf8(b"d7b939771a7800c413f90051012d975981916d71"), utf8(b"b2620d5e7f132b52afe8875cdf3776c064249d04")], +/// vector[utf8(b"RS256"), utf8(b"RS256")], +/// vector[utf8(b"AQAB"), utf8(b"AQAB")], +/// vector[ +/// utf8(b"wNHgGSG5B5xOEQNFPW2p_6ZxZbfPoAU5VceBUuNwQWLop0ohW0vpoZLU1tAsq_S9s5iwy27rJw4EZAOGBR9oTRq1Y6Li5pDVJfmzyRNtmWCWndR-bPqhs_dkJU7MbGwcvfLsN9FSHESFrS9sfGtUX-lZfLoGux23TKdYV9EE-H-NDASxrVFUk2GWc3rL6UEMWrMnOqV9-tghybDU3fcRdNTDuXUr9qDYmhmNegYjYu4REGjqeSyIG1tuQxYpOBH-tohtcfGY-oRTS09kgsSS9Q5BRM4qqCkGP28WhlSf4ui0-norS0gKMMI1P_ZAGEsLn9p2TlYMpewvIuhjJs1thw"), +/// utf8(b"pi22xDdK2fz5gclIbDIGghLDYiRO56eW2GUcboeVlhbAuhuT5mlEYIevkxdPOg5n6qICePZiQSxkwcYMIZyLkZhSJ2d2M6Szx2gDtnAmee6o_tWdroKu0DjqwG8pZU693oLaIjLku3IK20lTs6-2TeH-pUYMjEqiFMhn-hb7wnvH_FuPTjgz9i0rEdw_Hf3Wk6CMypaUHi31y6twrMWq1jEbdQNl50EwH-RQmQ9bs3Wm9V9t-2-_Jzg3AT0Ny4zEDU7WXgN2DevM8_FVje4IgztNy29XUkeUctHsr-431_Iu23JIy6U4Kxn36X3RlVUKEkOMpkDD3kd81JPW4Ger_w") +/// ] +/// ) +/// ``` +/// +/// See AIP-96 for more details about federated keyless - https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-96.md +/// +/// NOTE: Currently only RSA keys are supported. +pub fn jwks_update_federated_jwk_set( + iss: Vec, + kid_vec: Vec>, + alg_vec: Vec>, + e_vec: Vec>, + n_vec: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("jwks").to_owned(), + ), + ident_str!("update_federated_jwk_set").to_owned(), + vec![], + vec![ + bcs::to_bytes(&iss).unwrap(), + bcs::to_bytes(&kid_vec).unwrap(), + bcs::to_bytes(&alg_vec).unwrap(), + bcs::to_bytes(&e_vec).unwrap(), + bcs::to_bytes(&n_vec).unwrap(), + ], + )) +} + +/// Withdraw an `amount` of coin `CoinType` from `account` and burn it. +pub fn managed_coin_burn(coin_type: TypeTag, amount: u64) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("managed_coin").to_owned(), + ), + ident_str!("burn").to_owned(), + vec![coin_type], + vec![bcs::to_bytes(&amount).unwrap()], + )) +} + +/// Destroys capabilities from the account, so that the user no longer has access to mint or burn. +pub fn managed_coin_destroy_caps(coin_type: TypeTag) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( AccountAddress::new([ @@ -3074,9 +3548,9 @@ pub fn managed_coin_burn(coin_type: TypeTag, amount: u64) -> TransactionPayload ]), ident_str!("managed_coin").to_owned(), ), - ident_str!("burn").to_owned(), + ident_str!("destroy_caps").to_owned(), vec![coin_type], - vec![bcs::to_bytes(&amount).unwrap()], + vec![], )) } @@ -3379,6 +3853,69 @@ pub fn multisig_account_create_with_existing_account_and_revoke_auth_key( )) } +/// Private entry function that creates a new multisig account on top of an existing account and immediately rotate +/// the origin auth key to 0x0. +/// +/// Note: If the original account is a resource account, this does not revoke all control over it as if any +/// SignerCapability of the resource account still exists, it can still be used to generate the signer for the +/// account. +pub fn multisig_account_create_with_existing_account_and_revoke_auth_key_call( + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("multisig_account").to_owned(), + ), + ident_str!("create_with_existing_account_and_revoke_auth_key_call").to_owned(), + vec![], + vec![ + bcs::to_bytes(&owners).unwrap(), + bcs::to_bytes(&num_signatures_required).unwrap(), + bcs::to_bytes(&metadata_keys).unwrap(), + bcs::to_bytes(&metadata_values).unwrap(), + ], + )) +} + +/// Private entry function that creates a new multisig account on top of an existing account. +/// +/// This offers a migration path for an existing account with any type of auth key. +/// +/// Note that this does not revoke auth key-based control over the account. Owners should separately rotate the auth +/// key after they are fully migrated to the new multisig account. Alternatively, they can call +/// create_with_existing_account_and_revoke_auth_key_call instead. +pub fn multisig_account_create_with_existing_account_call( + owners: Vec, + num_signatures_required: u64, + metadata_keys: Vec>, + metadata_values: Vec>, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("multisig_account").to_owned(), + ), + ident_str!("create_with_existing_account_call").to_owned(), + vec![], + vec![ + bcs::to_bytes(&owners).unwrap(), + bcs::to_bytes(&num_signatures_required).unwrap(), + bcs::to_bytes(&metadata_keys).unwrap(), + bcs::to_bytes(&metadata_values).unwrap(), + ], + )) +} + /// Creates a new multisig account with the specified additional owner list and signatures required. /// /// @param additional_owners The owner account who calls this function cannot be in the additional_owners and there @@ -3734,118 +4271,59 @@ pub fn multisig_account_vote_transanction( )) } -/// Completes a bridge transfer on the destination chain. -/// -/// @param caller The signer representing the bridge relayer. -/// @param initiator The initiator's Ethereum address as a vector of bytes. -/// @param bridge_transfer_id The unique identifier for the bridge transfer. -/// @param recipient The address of the recipient on the Aptos blockchain. -/// @param amount The amount of assets to be locked. -/// @param nonce The unique nonce for the transfer. -/// @abort If the caller is not the bridge relayer or the transfer has already been processed. -pub fn native_bridge_complete_bridge_transfer( - bridge_transfer_id: Vec, - initiator: Vec, - recipient: AccountAddress, - amount: u64, - nonce: u64, -) -> TransactionPayload { +pub fn nonce_validation_add_nonce_buckets(count: u64) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( AccountAddress::new([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), - ident_str!("native_bridge").to_owned(), + ident_str!("nonce_validation").to_owned(), ), - ident_str!("complete_bridge_transfer").to_owned(), + ident_str!("add_nonce_buckets").to_owned(), vec![], - vec![ - bcs::to_bytes(&bridge_transfer_id).unwrap(), - bcs::to_bytes(&initiator).unwrap(), - bcs::to_bytes(&recipient).unwrap(), - bcs::to_bytes(&amount).unwrap(), - bcs::to_bytes(&nonce).unwrap(), - ], + vec![bcs::to_bytes(&count).unwrap()], )) } -/// Initiate a bridge transfer of MOVE from Movement to Ethereum -/// Anyone can initiate a bridge transfer from the source chain -/// The amount is burnt from the initiator and the module-level nonce is incremented -/// @param initiator The initiator's Ethereum address as a vector of bytes. -/// @param recipient The address of the recipient on the Aptos blockchain. -/// @param amount The amount of assets to be locked. -pub fn native_bridge_initiate_bridge_transfer( - recipient: Vec, - amount: u64, -) -> TransactionPayload { +pub fn nonce_validation_initialize_nonce_table() -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( AccountAddress::new([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), - ident_str!("native_bridge").to_owned(), + ident_str!("nonce_validation").to_owned(), ), - ident_str!("initiate_bridge_transfer").to_owned(), + ident_str!("initialize_nonce_table").to_owned(), vec![], - vec![ - bcs::to_bytes(&recipient).unwrap(), - bcs::to_bytes(&amount).unwrap(), - ], - )) -} - -/// Updates the bridge fee, requiring relayer validation. -/// -/// @param relayer The signer representing the Relayer. -/// @param new_bridge_fee The new bridge fee to be set. -/// @abort If the new bridge fee is the same as the old bridge fee. -pub fn native_bridge_update_bridge_fee(new_bridge_fee: u64) -> TransactionPayload { - TransactionPayload::EntryFunction(EntryFunction::new( - ModuleId::new( - AccountAddress::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, - ]), - ident_str!("native_bridge").to_owned(), - ), - ident_str!("update_bridge_fee").to_owned(), vec![], - vec![bcs::to_bytes(&new_bridge_fee).unwrap()], )) } -/// Updates the insurance budget divider, requiring governance validation. -/// -/// @param aptos_framework The signer representing the Aptos framework. -/// @param new_insurance_budget_divider The new insurance budget divider to be set. -/// @abort If the new insurance budget divider is the same as the old insurance budget divider. -pub fn native_bridge_update_insurance_budget_divider( - new_insurance_budget_divider: u64, -) -> TransactionPayload { +/// Entry function that can be used to transfer, if allow_ungated_transfer is set true. +pub fn object_transfer_call(object: AccountAddress, to: AccountAddress) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( AccountAddress::new([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), - ident_str!("native_bridge").to_owned(), + ident_str!("object").to_owned(), ), - ident_str!("update_insurance_budget_divider").to_owned(), + ident_str!("transfer_call").to_owned(), vec![], - vec![bcs::to_bytes(&new_insurance_budget_divider).unwrap()], + vec![bcs::to_bytes(&object).unwrap(), bcs::to_bytes(&to).unwrap()], )) } -/// Updates the insurance fund, requiring governance validation. -/// -/// @param aptos_framework The signer representing the Aptos framework. -/// @param new_insurance_fund The new insurance fund to be set. -/// @abort If the new insurance fund is the same as the old insurance fund. -pub fn native_bridge_update_insurance_fund( - new_insurance_fund: AccountAddress, +/// Creates a new object with a unique address derived from the publisher address and the object seed. +/// Publishes the code passed in the function to the newly created object. +/// The caller must provide package metadata describing the package via `metadata_serialized` and +/// the code to be published via `code`. This contains a vector of modules to be deployed on-chain. +pub fn object_code_deployment_publish( + metadata_serialized: Vec, + code: Vec>, ) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( @@ -3853,37 +4331,37 @@ pub fn native_bridge_update_insurance_fund( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), - ident_str!("native_bridge").to_owned(), + ident_str!("object_code_deployment").to_owned(), ), - ident_str!("update_insurance_fund").to_owned(), + ident_str!("publish").to_owned(), vec![], - vec![bcs::to_bytes(&new_insurance_fund).unwrap()], + vec![ + bcs::to_bytes(&metadata_serialized).unwrap(), + bcs::to_bytes(&code).unwrap(), + ], )) } -/// Entry function that can be used to transfer, if allow_ungated_transfer is set true. -pub fn object_transfer_call(object: AccountAddress, to: AccountAddress) -> TransactionPayload { +/// Revoke all storable permission handle of the signer immediately. +pub fn permissioned_signer_revoke_all_handles() -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( AccountAddress::new([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), - ident_str!("object").to_owned(), + ident_str!("permissioned_signer").to_owned(), ), - ident_str!("transfer_call").to_owned(), + ident_str!("revoke_all_handles").to_owned(), + vec![], vec![], - vec![bcs::to_bytes(&object).unwrap(), bcs::to_bytes(&to).unwrap()], )) } -/// Creates a new object with a unique address derived from the publisher address and the object seed. -/// Publishes the code passed in the function to the newly created object. -/// The caller must provide package metadata describing the package via `metadata_serialized` and -/// the code to be published via `code`. This contains a vector of modules to be deployed on-chain. -pub fn object_code_deployment_publish( - metadata_serialized: Vec, - code: Vec>, +/// Revoke a specific storable permission handle immediately. This will disallow owner of +/// the storable permission handle to derive signer from it anymore. +pub fn permissioned_signer_revoke_permission_storage_address( + permissions_storage_addr: AccountAddress, ) -> TransactionPayload { TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new( @@ -3891,14 +4369,11 @@ pub fn object_code_deployment_publish( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]), - ident_str!("object_code_deployment").to_owned(), + ident_str!("permissioned_signer").to_owned(), ), - ident_str!("publish").to_owned(), + ident_str!("revoke_permission_storage_address").to_owned(), vec![], - vec![ - bcs::to_bytes(&metadata_serialized).unwrap(), - bcs::to_bytes(&code).unwrap(), - ], + vec![bcs::to_bytes(&permissions_storage_addr).unwrap()], )) } @@ -5096,59 +5571,210 @@ mod decoder { } } - pub fn account_revoke_signer_capability( + pub fn account_revoke_signer_capability( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::AccountRevokeSignerCapability { + to_be_revoked_address: bcs::from_bytes(script.args().get(0)?).ok()?, + }) + } else { + None + } + } + + pub fn account_rotate_authentication_key( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::AccountRotateAuthenticationKey { + from_scheme: bcs::from_bytes(script.args().get(0)?).ok()?, + from_public_key_bytes: bcs::from_bytes(script.args().get(1)?).ok()?, + to_scheme: bcs::from_bytes(script.args().get(2)?).ok()?, + to_public_key_bytes: bcs::from_bytes(script.args().get(3)?).ok()?, + cap_rotate_key: bcs::from_bytes(script.args().get(4)?).ok()?, + cap_update_table: bcs::from_bytes(script.args().get(5)?).ok()?, + }) + } else { + None + } + } + + pub fn account_rotate_authentication_key_call( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::AccountRotateAuthenticationKeyCall { + new_auth_key: bcs::from_bytes(script.args().get(0)?).ok()?, + }) + } else { + None + } + } + + pub fn account_rotate_authentication_key_from_public_key( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::AccountRotateAuthenticationKeyFromPublicKey { + scheme: bcs::from_bytes(script.args().get(0)?).ok()?, + new_public_key_bytes: bcs::from_bytes(script.args().get(1)?).ok()?, + }, + ) + } else { + None + } + } + + pub fn account_rotate_authentication_key_with_rotation_capability( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::AccountRotateAuthenticationKeyWithRotationCapability { + rotation_cap_offerer_address: bcs::from_bytes(script.args().get(0)?).ok()?, + new_scheme: bcs::from_bytes(script.args().get(1)?).ok()?, + new_public_key_bytes: bcs::from_bytes(script.args().get(2)?).ok()?, + cap_update_table: bcs::from_bytes(script.args().get(3)?).ok()?, + }, + ) + } else { + None + } + } + + pub fn account_set_originating_address( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(_script) = payload { + Some(EntryFunctionCall::AccountSetOriginatingAddress {}) + } else { + None + } + } + + pub fn account_upsert_ed25519_backup_key_on_keyless_account( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::AccountUpsertEd25519BackupKeyOnKeylessAccount { + keyless_public_key: bcs::from_bytes(script.args().get(0)?).ok()?, + backup_public_key: bcs::from_bytes(script.args().get(1)?).ok()?, + backup_key_proof: bcs::from_bytes(script.args().get(2)?).ok()?, + }, + ) + } else { + None + } + } + + pub fn account_abstraction_add_authentication_function( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::AccountAbstractionAddAuthenticationFunction { + module_address: bcs::from_bytes(script.args().get(0)?).ok()?, + module_name: bcs::from_bytes(script.args().get(1)?).ok()?, + function_name: bcs::from_bytes(script.args().get(2)?).ok()?, + }, + ) + } else { + None + } + } + + pub fn account_abstraction_add_dispatchable_authentication_function( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::AccountAbstractionAddDispatchableAuthenticationFunction { + _module_address: bcs::from_bytes(script.args().get(0)?).ok()?, + _module_name: bcs::from_bytes(script.args().get(1)?).ok()?, + _function_name: bcs::from_bytes(script.args().get(2)?).ok()?, + }, + ) + } else { + None + } + } + + pub fn account_abstraction_initialize( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(_script) = payload { + Some(EntryFunctionCall::AccountAbstractionInitialize {}) + } else { + None + } + } + + pub fn account_abstraction_register_derivable_authentication_function( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::AccountAbstractionRegisterDerivableAuthenticationFunction { + module_address: bcs::from_bytes(script.args().get(0)?).ok()?, + module_name: bcs::from_bytes(script.args().get(1)?).ok()?, + function_name: bcs::from_bytes(script.args().get(2)?).ok()?, + }, + ) + } else { + None + } + } + + pub fn account_abstraction_remove_authentication_function( payload: &TransactionPayload, ) -> Option { if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::AccountRevokeSignerCapability { - to_be_revoked_address: bcs::from_bytes(script.args().get(0)?).ok()?, - }) + Some( + EntryFunctionCall::AccountAbstractionRemoveAuthenticationFunction { + module_address: bcs::from_bytes(script.args().get(0)?).ok()?, + module_name: bcs::from_bytes(script.args().get(1)?).ok()?, + function_name: bcs::from_bytes(script.args().get(2)?).ok()?, + }, + ) } else { None } } - pub fn account_rotate_authentication_key( + pub fn account_abstraction_remove_authenticator( payload: &TransactionPayload, ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::AccountRotateAuthenticationKey { - from_scheme: bcs::from_bytes(script.args().get(0)?).ok()?, - from_public_key_bytes: bcs::from_bytes(script.args().get(1)?).ok()?, - to_scheme: bcs::from_bytes(script.args().get(2)?).ok()?, - to_public_key_bytes: bcs::from_bytes(script.args().get(3)?).ok()?, - cap_rotate_key: bcs::from_bytes(script.args().get(4)?).ok()?, - cap_update_table: bcs::from_bytes(script.args().get(5)?).ok()?, - }) + if let TransactionPayload::EntryFunction(_script) = payload { + Some(EntryFunctionCall::AccountAbstractionRemoveAuthenticator {}) } else { None } } - pub fn account_rotate_authentication_key_call( + pub fn account_abstraction_remove_dispatchable_authentication_function( payload: &TransactionPayload, ) -> Option { if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::AccountRotateAuthenticationKeyCall { - new_auth_key: bcs::from_bytes(script.args().get(0)?).ok()?, - }) + Some( + EntryFunctionCall::AccountAbstractionRemoveDispatchableAuthenticationFunction { + _module_address: bcs::from_bytes(script.args().get(0)?).ok()?, + _module_name: bcs::from_bytes(script.args().get(1)?).ok()?, + _function_name: bcs::from_bytes(script.args().get(2)?).ok()?, + }, + ) } else { None } } - pub fn account_rotate_authentication_key_with_rotation_capability( + pub fn account_abstraction_remove_dispatchable_authenticator( payload: &TransactionPayload, ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some( - EntryFunctionCall::AccountRotateAuthenticationKeyWithRotationCapability { - rotation_cap_offerer_address: bcs::from_bytes(script.args().get(0)?).ok()?, - new_scheme: bcs::from_bytes(script.args().get(1)?).ok()?, - new_public_key_bytes: bcs::from_bytes(script.args().get(2)?).ok()?, - cap_update_table: bcs::from_bytes(script.args().get(3)?).ok()?, - }, - ) + if let TransactionPayload::EntryFunction(_script) = payload { + Some(EntryFunctionCall::AccountAbstractionRemoveDispatchableAuthenticator {}) } else { None } @@ -5189,6 +5815,19 @@ mod decoder { } } + pub fn aptos_account_fungible_transfer_only( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::AptosAccountFungibleTransferOnly { + to: bcs::from_bytes(script.args().get(0)?).ok()?, + amount: bcs::from_bytes(script.args().get(1)?).ok()?, + }) + } else { + None + } + } + pub fn aptos_account_set_allow_direct_coin_transfers( payload: &TransactionPayload, ) -> Option { @@ -5384,83 +6023,6 @@ mod decoder { } } - pub fn atomic_bridge_counterparty_abort_bridge_transfer( - payload: &TransactionPayload, - ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some( - EntryFunctionCall::AtomicBridgeCounterpartyAbortBridgeTransfer { - bridge_transfer_id: bcs::from_bytes(script.args().get(0)?).ok()?, - }, - ) - } else { - None - } - } - - pub fn atomic_bridge_initiator_complete_bridge_transfer( - payload: &TransactionPayload, - ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some( - EntryFunctionCall::AtomicBridgeInitiatorCompleteBridgeTransfer { - bridge_transfer_id: bcs::from_bytes(script.args().get(0)?).ok()?, - pre_image: bcs::from_bytes(script.args().get(1)?).ok()?, - }, - ) - } else { - None - } - } - - pub fn atomic_bridge_initiator_initiate_bridge_transfer( - payload: &TransactionPayload, - ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some( - EntryFunctionCall::AtomicBridgeInitiatorInitiateBridgeTransfer { - recipient: bcs::from_bytes(script.args().get(0)?).ok()?, - hash_lock: bcs::from_bytes(script.args().get(1)?).ok()?, - amount: bcs::from_bytes(script.args().get(2)?).ok()?, - }, - ) - } else { - None - } - } - - pub fn atomic_bridge_counterparty_lock_bridge_transfer_assets( - payload: &TransactionPayload, - ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some( - EntryFunctionCall::AtomicBridgeCounterpartyLockBridgeTransferAssets { - initiator: bcs::from_bytes(script.args().get(0)?).ok()?, - bridge_transfer_id: bcs::from_bytes(script.args().get(1)?).ok()?, - hash_lock: bcs::from_bytes(script.args().get(2)?).ok()?, - recipient: bcs::from_bytes(script.args().get(3)?).ok()?, - amount: bcs::from_bytes(script.args().get(4)?).ok()?, - }, - ) - } else { - None - } - } - - pub fn atomic_bridge_initiator_refund_bridge_transfer( - payload: &TransactionPayload, - ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some( - EntryFunctionCall::AtomicBridgeInitiatorRefundBridgeTransfer { - bridge_transfer_id: bcs::from_bytes(script.args().get(0)?).ok()?, - }, - ) - } else { - None - } - } - pub fn code_publish_package_txn(payload: &TransactionPayload) -> Option { if let TransactionPayload::EntryFunction(script) = payload { Some(EntryFunctionCall::CodePublishPackageTxn { @@ -5492,6 +6054,19 @@ mod decoder { } } + pub fn coin_migrate_coin_store_to_fungible_store( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::CoinMigrateCoinStoreToFungibleStore { + coin_type: script.ty_args().get(0)?.clone(), + accounts: bcs::from_bytes(script.args().get(0)?).ok()?, + }) + } else { + None + } + } + pub fn coin_migrate_to_fungible_store( payload: &TransactionPayload, ) -> Option { @@ -5681,7 +6256,7 @@ mod decoder { ) -> Option { if let TransactionPayload::EntryFunction(script) = payload { Some(EntryFunctionCall::DelegationPoolSetDelegatedVoter { - new_voter: bcs::from_bytes(script.args().get(0)?).ok()?, + _new_voter: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None @@ -5759,6 +6334,22 @@ mod decoder { } } + pub fn jwks_update_federated_jwk_set( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::JwksUpdateFederatedJwkSet { + iss: bcs::from_bytes(script.args().get(0)?).ok()?, + kid_vec: bcs::from_bytes(script.args().get(1)?).ok()?, + alg_vec: bcs::from_bytes(script.args().get(2)?).ok()?, + e_vec: bcs::from_bytes(script.args().get(3)?).ok()?, + n_vec: bcs::from_bytes(script.args().get(4)?).ok()?, + }) + } else { + None + } + } + pub fn managed_coin_burn(payload: &TransactionPayload) -> Option { if let TransactionPayload::EntryFunction(script) = payload { Some(EntryFunctionCall::ManagedCoinBurn { @@ -5770,6 +6361,16 @@ mod decoder { } } + pub fn managed_coin_destroy_caps(payload: &TransactionPayload) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some(EntryFunctionCall::ManagedCoinDestroyCaps { + coin_type: script.ty_args().get(0)?.clone(), + }) + } else { + None + } + } + pub fn managed_coin_initialize(payload: &TransactionPayload) -> Option { if let TransactionPayload::EntryFunction(script) = payload { Some(EntryFunctionCall::ManagedCoinInitialize { @@ -5938,6 +6539,40 @@ mod decoder { } } + pub fn multisig_account_create_with_existing_account_and_revoke_auth_key_call( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::MultisigAccountCreateWithExistingAccountAndRevokeAuthKeyCall { + owners: bcs::from_bytes(script.args().get(0)?).ok()?, + num_signatures_required: bcs::from_bytes(script.args().get(1)?).ok()?, + metadata_keys: bcs::from_bytes(script.args().get(2)?).ok()?, + metadata_values: bcs::from_bytes(script.args().get(3)?).ok()?, + }, + ) + } else { + None + } + } + + pub fn multisig_account_create_with_existing_account_call( + payload: &TransactionPayload, + ) -> Option { + if let TransactionPayload::EntryFunction(script) = payload { + Some( + EntryFunctionCall::MultisigAccountCreateWithExistingAccountCall { + owners: bcs::from_bytes(script.args().get(0)?).ok()?, + num_signatures_required: bcs::from_bytes(script.args().get(1)?).ok()?, + metadata_keys: bcs::from_bytes(script.args().get(2)?).ok()?, + metadata_values: bcs::from_bytes(script.args().get(3)?).ok()?, + }, + ) + } else { + None + } + } + pub fn multisig_account_create_with_owners( payload: &TransactionPayload, ) -> Option { @@ -6142,92 +6777,71 @@ mod decoder { } } - pub fn native_bridge_complete_bridge_transfer( + pub fn nonce_validation_add_nonce_buckets( payload: &TransactionPayload, ) -> Option { if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::NativeBridgeCompleteBridgeTransfer { - bridge_transfer_id: bcs::from_bytes(script.args().get(0)?).ok()?, - initiator: bcs::from_bytes(script.args().get(1)?).ok()?, - recipient: bcs::from_bytes(script.args().get(2)?).ok()?, - amount: bcs::from_bytes(script.args().get(3)?).ok()?, - nonce: bcs::from_bytes(script.args().get(4)?).ok()?, + Some(EntryFunctionCall::NonceValidationAddNonceBuckets { + count: bcs::from_bytes(script.args().get(0)?).ok()?, }) } else { None } } - pub fn native_bridge_initiate_bridge_transfer( + pub fn nonce_validation_initialize_nonce_table( payload: &TransactionPayload, ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::NativeBridgeInitiateBridgeTransfer { - recipient: bcs::from_bytes(script.args().get(0)?).ok()?, - amount: bcs::from_bytes(script.args().get(1)?).ok()?, - }) + if let TransactionPayload::EntryFunction(_script) = payload { + Some(EntryFunctionCall::NonceValidationInitializeNonceTable {}) } else { None } } - pub fn native_bridge_update_bridge_fee( - payload: &TransactionPayload, - ) -> Option { + pub fn object_transfer_call(payload: &TransactionPayload) -> Option { if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::NativeBridgeUpdateBridgeFee { - new_bridge_fee: bcs::from_bytes(script.args().get(0)?).ok()?, + Some(EntryFunctionCall::ObjectTransferCall { + object: bcs::from_bytes(script.args().get(0)?).ok()?, + to: bcs::from_bytes(script.args().get(1)?).ok()?, }) } else { None } } - pub fn native_bridge_update_insurance_budget_divider( + pub fn object_code_deployment_publish( payload: &TransactionPayload, ) -> Option { if let TransactionPayload::EntryFunction(script) = payload { - Some( - EntryFunctionCall::NativeBridgeUpdateInsuranceBudgetDivider { - new_insurance_budget_divider: bcs::from_bytes(script.args().get(0)?).ok()?, - }, - ) + Some(EntryFunctionCall::ObjectCodeDeploymentPublish { + metadata_serialized: bcs::from_bytes(script.args().get(0)?).ok()?, + code: bcs::from_bytes(script.args().get(1)?).ok()?, + }) } else { None } } - pub fn native_bridge_update_insurance_fund( + pub fn permissioned_signer_revoke_all_handles( payload: &TransactionPayload, ) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::NativeBridgeUpdateInsuranceFund { - new_insurance_fund: bcs::from_bytes(script.args().get(0)?).ok()?, - }) - } else { - None - } - } - - pub fn object_transfer_call(payload: &TransactionPayload) -> Option { - if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::ObjectTransferCall { - object: bcs::from_bytes(script.args().get(0)?).ok()?, - to: bcs::from_bytes(script.args().get(1)?).ok()?, - }) + if let TransactionPayload::EntryFunction(_script) = payload { + Some(EntryFunctionCall::PermissionedSignerRevokeAllHandles {}) } else { None } } - pub fn object_code_deployment_publish( + pub fn permissioned_signer_revoke_permission_storage_address( payload: &TransactionPayload, ) -> Option { if let TransactionPayload::EntryFunction(script) = payload { - Some(EntryFunctionCall::ObjectCodeDeploymentPublish { - metadata_serialized: bcs::from_bytes(script.args().get(0)?).ok()?, - code: bcs::from_bytes(script.args().get(1)?).ok()?, - }) + Some( + EntryFunctionCall::PermissionedSignerRevokePermissionStorageAddress { + permissions_storage_addr: bcs::from_bytes(script.args().get(0)?).ok()?, + }, + ) } else { None } @@ -6948,10 +7562,54 @@ static SCRIPT_FUNCTION_DECODER_MAP: once_cell::sync::Lazy TransactionPayload { - coin_transfer( - aptos_types::utility_coin::APTOS_COIN_TYPE.clone(), - to, - amount, - ) + coin_transfer(AptosCoinType::type_tag(), to, amount) } pub fn publish_module_source(module_name: &str, module_src: &str) -> TransactionPayload { diff --git a/aptos-move/framework/evaluations/README.md b/aptos-move/framework/evaluations/README.md deleted file mode 100644 index 1296c7e315acf..0000000000000 --- a/aptos-move/framework/evaluations/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Evaluations -This folder contains written evaluations of features within the Aptos Framework, typically with respect to some proposed change. - -## Contents -- **[Aptos Coin Mint Capability](./aptos-coin-mint-capability):** evaluations of the Aptos Coin mint capability. \ No newline at end of file diff --git a/aptos-move/framework/evaluations/aptos-coin-mint-capability/README.md b/aptos-move/framework/evaluations/aptos-coin-mint-capability/README.md deleted file mode 100644 index 04dfe58a20380..0000000000000 --- a/aptos-move/framework/evaluations/aptos-coin-mint-capability/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Aptos Coin Mint Capability -This directory evaluates various usages of the Aptos Coin mint capability. - -## Contents -- **[Destroying](./destroying):** evaluates the destroying the Aptos Coin mint capability. \ No newline at end of file diff --git a/aptos-move/framework/evaluations/aptos-coin-mint-capability/destroying/README.md b/aptos-move/framework/evaluations/aptos-coin-mint-capability/destroying/README.md deleted file mode 100644 index d2d3f6fb68104..0000000000000 --- a/aptos-move/framework/evaluations/aptos-coin-mint-capability/destroying/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# Destroying the Aptos Coin Mint Capability -We assert that destroying the Aptos Coin mint capability would not cause failures of necessary operations of a chain which has the Governed Gas Pool feature flags enabled and has removed staking rewards. - -We begin with a brief section on the [procedure and intended outcomes](#procedure-and-intended-outcomes) of destroying the Aptos Coin mint capability. - -We then evaluate the means of [reversing this procedure](#reversing-the-procedure) and the potential [side effects](#side-effects) of doing so. - -We then list all identified [call sites](#call-sites) using the Aptos Coin mint capability. - -Finally, we the evaluate the following potential usages of the Aptos Coin mint capability as would affect critical system properties. These are: - -1. **[Transaction `prologue` and `epilogue`](#transaction-epilogue-and-prologue):** we assert that destroying the mint capability would cause failures of the transaction `prologue` and `epilogue` and thus general transaction processing under the Governed Gas Pool feature flags. -2. **[Token transfers](#token-transfers):** we assert destroying the mint capability would not cause failures of token transfers. -3. **[FA migration](#fa-migration):** we assert that destroying the mint capability would not cause failures of FA migration. - -## Procedure and intended outcomes -> [!WARNING] -> In general, a user who has access to the `core_resource_account` signer has the ability to make and publish changes to the framework which can remove restrictions on minting, recreate capabilities, etc. However, we maintain that under the Biarritz Model, these are a tolerable risk--particularly as structures and their storage are preserved by the Aptos Move VM inherently and thus can be restored. -> -> Our evaluation thus concerns effectively destroying the mint capability in such a manner that a user who holds the `core_resource_account` signer would need to introduce a new framework to restore it--as opposed to simply running a series of transactions against existing code. This renders exploits costly, but not impossible. - -### Framework changes -Because Aptos Move scripts cannot borrow structs but instead need to call `public` functions, we must update the framework to expose an `aptos_coin::destory_mint_capability_v2` which wraps the existing `public(friend) aptos_coin::destory_mint_capability`. The body of this function would be as follows: - -```rust -public fun destroy_mint_capability_from(account: signer, from: account) acquires Delegations { - system_addresses::assert_aptos_framework(aptos_framework); - let MintCapStore { mint_cap } = move_from(from); - coin::destroy_mint_cap(mint_cap); -} -``` - -Even though `delegate_mint_capability` does not assert that the `core_resource_account` still has the mint capability, when the capability is copied in the [`claim_mint_capability`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/aptos_coin.move#L124) function, the borrow would fail. Thus, the resource account both can no longer use the mint capability and cannot delegate it to another account. - -### Script -The script to burn the mint capability would then be as follows: - -```rust -script { - use aptos_framework::aptos_governance; - use aptos_framework::aptos_coin; - - fun main(core_resources: &signer) { - - let core_signer = aptos_governance::get_signer_testnet_only(core_resources, @0x1); - - let framework_signer = &core_signer; - - // for core signer - aptos_coin::destroy_mint_capability_from(framework_signer, @0x1); - - // for other signers - aptos_governance::destroy_mint_capability_from(framework_signer, an_account); - aptos_governance::destroy_mint_capability_from(framework_signer, another_account); - - } -} -``` - -## Reversing the procedure -To reverse the procedure, a user would need to introduce a new version of the framework which exposes a `create_mint_capability` function. This function would mimic the initialization procedure and look as follows: - -```rust -public fun create_mint_capability_v2(account: signer, to: address) { - system_addresses::assert_aptos_framework(aptos_framework); - let mint_cap = coin::create_mint_cap(); - let mint_cap_store = MintCapStore { mint_cap }; - move_to(to, mint_cap_store); -} -``` - -In a subsequent script, the user would then call this function to recreate the mint capability and use it as needed. - -## Call sites -Call sites for the Aptos Coin mint were identified in the follow ways: - -1. **Relevant `.mint_cap` borrows:** we searched for borrows of the `AptosCoinCapabilites` struct that used the `mint_cap` member field. -2. **`aptos_coin` internals:** We searched for direct usages of the `mint` capability struct within the `aptos_coin` module (this is the only place that struct can be used directly) and subsequent call sites. - -### Relevant `.mint_cap` borrows -- **[`distribute_rewards`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/stake.move#L1648):** is method used to issue rewards to validators from the reward aggregator pool. We assert that this is not called under the GGP feature flag in the block `epilogue` below. -- **[`mint_and_refund`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/transaction_fee.move#L268):** is a method used to mint and refund transaction fees. We assert that this is not called under the GGP feature flag in the block `epilogue` below. - -### `aptos_coin` internals -We identified the following methods, all of which are used faucet or test branches of execution: - -1. [`claim_mint_capability`](https://github.com/search?q=repo%3Amovementlabsxyz%2Faptos-core%20claim_mint_capability&type=code) -2. [`delegate_mint_capability`](https://github.com/search?q=repo%3Amovementlabsxyz%2Faptos-core+delegate_mint_capability&type=code) - -## Usages and side effects - -### Prologue and epilogue - -#### Prologue -There are distinct prologues for executing blocks and transactions within blocks in the Aptos Framework. - -##### [`block_prologue`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/block.move#L224) - -The [`block_prologue`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/block.move#L224) and its DKG variant `block_prologue_ext` both primarily call to [`block_prologue_common`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/block.move#L155). - -We did not identify any usages of `.mint_cap` on any of the `block_prologue_common` branches. - -##### Transaction `prologue` -All transactions now call one of the [`*_script_prologue`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/aptos-vm/src/aptos_vm.rs#L2244) functions, which in turn call [`prologue_common`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/transaction_validation.move#L74). - -We did not identify any usages of `.mint_cap` on any of the `prologue_common` branches. - -#### Epilogue -There are distinct epilogues for executing blocks and transactions within blocks in the Aptos Framework. - -##### Block epilogue -The Block Epilogue does not map neatly to a single function. Importantly, it can trigger `reconfiguration::reconfigure`(https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/reconfiguration.move#L107) which calls the following coin invoking methods: - -- `transaction_fee::process_collected_fees` which does not invoke any minting capabilities. -- `stake::on_new_epoch` which mints rewards for validators in `distribute_rewards`[https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/stake.move#L1648]. - -To ensure the block epilogue does not `abort` on the `mint` branch, we would need to either set the reward rate to zero or add logic to skip the minting of rewards under a given feature flag. - -##### Transaction [`epilogue`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/transaction_validation.move#L262) - -The transaction [`epilogue`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/transaction_validation.move#L330) does make a call to `mint_and_refund` in the `mint` branch. However, this is [disabled](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/transaction_validation.move#L330) when the `ggp` feature flag is set. - -### Token transfers -- [`coin::transfer`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/coin.move#L1151) does not invoke the mint capability (note the lack of an `acquires MintCapability` in the function signature and the lack of an argument requesting the capability). -- [`fa::transfer`](https://github.com/movementlabsxyz/aptos-core/blob/aa45303216be96ea30d361ab7eb2e95fb08c2dcb/aptos-move/framework/aptos-framework/sources/fungible_asset.move#L655) does not invoke the mint capability (note the lack of an `acquires MintCapability` in the function signature and the lack of an argument requesting the capability). - -### FA migration -We did not identify any usages of the mint capability on the original coin in the FA migration features, however, minting new representations coin balances is a core feature of the FA migration process. diff --git a/aptos-move/framework/move-stdlib/Cargo.toml b/aptos-move/framework/move-stdlib/Cargo.toml index e02f9290530f3..4aa6fd1223f0b 100644 --- a/aptos-move/framework/move-stdlib/Cargo.toml +++ b/aptos-move/framework/move-stdlib/Cargo.toml @@ -13,20 +13,22 @@ publish = false [dependencies] aptos-gas-schedule = { workspace = true } aptos-native-interface = { workspace = true } -move-core-types = { path = "../../../third_party/move/move-core/types" } -move-vm-runtime = { path = "../../../third_party/move/move-vm/runtime" } -move-vm-types = { path = "../../../third_party/move/move-vm/types" } -sha2 = "0.9.3" -sha3 = "0.9.1" -smallvec = "1.6.1" +aptos-types = { workspace = true } +bcs = { workspace = true } +move-core-types = { workspace = true } +move-vm-runtime = { workspace = true } +move-vm-types = { workspace = true } +sha2 = { workspace = true } +sha3 = { workspace = true } +smallvec = { workspace = true } [dev-dependencies] dir-diff = "0.3.2" file_diff = "1.0.0" -move-cli = { path = "../../../third_party/move/tools/move-cli" } -move-package = { path = "../../../third_party/move/tools/move-package" } -move-unit-test = { path = "../../../third_party/move/tools/move-unit-test" } -tempfile = "3.2.0" +move-cli = { workspace = true } +move-package = { workspace = true } +move-unit-test = { workspace = true } +tempfile = { workspace = true } [features] -testing = [] \ No newline at end of file +testing = [] diff --git a/aptos-move/framework/move-stdlib/doc/acl.md b/aptos-move/framework/move-stdlib/doc/acl.md index 5310909afc734..5d8f1a1c56650 100644 --- a/aptos-move/framework/move-stdlib/doc/acl.md +++ b/aptos-move/framework/move-stdlib/doc/acl.md @@ -114,7 +114,7 @@ Return an empty ACL. Add the address to the ACL. -
public fun add(acl: &mut acl::ACL, addr: address)
+
public fun add(self: &mut acl::ACL, addr: address)
 
@@ -123,9 +123,9 @@ Add the address to the ACL. Implementation -
public fun add(acl: &mut ACL, addr: address) {
-    assert!(!vector::contains(&mut acl.list, &addr), error::invalid_argument(ECONTAIN));
-    vector::push_back(&mut acl.list, addr);
+
public fun add(self: &mut ACL, addr: address) {
+    assert!(!self.list.contains(&addr), error::invalid_argument(ECONTAIN));
+    self.list.push_back(addr);
 }
 
@@ -140,7 +140,7 @@ Add the address to the ACL. Remove the address from the ACL. -
public fun remove(acl: &mut acl::ACL, addr: address)
+
public fun remove(self: &mut acl::ACL, addr: address)
 
@@ -149,10 +149,10 @@ Remove the address from the ACL. Implementation -
public fun remove(acl: &mut ACL, addr: address) {
-    let (found, index) = vector::index_of(&mut acl.list, &addr);
+
public fun remove(self: &mut ACL, addr: address) {
+    let (found, index) = self.list.index_of(&addr);
     assert!(found, error::invalid_argument(ENOT_CONTAIN));
-    vector::remove(&mut acl.list, index);
+    self.list.remove(index);
 }
 
@@ -167,7 +167,7 @@ Remove the address from the ACL. Return true iff the ACL contains the address. -
public fun contains(acl: &acl::ACL, addr: address): bool
+
public fun contains(self: &acl::ACL, addr: address): bool
 
@@ -176,8 +176,8 @@ Return true iff the ACL contains the address. Implementation -
public fun contains(acl: &ACL, addr: address): bool {
-    vector::contains(&acl.list, &addr)
+
public fun contains(self: &ACL, addr: address): bool {
+    self.list.contains(&addr)
 }
 
@@ -192,7 +192,7 @@ Return true iff the ACL contains the address. assert! that the ACL has the address. -
public fun assert_contains(acl: &acl::ACL, addr: address)
+
public fun assert_contains(self: &acl::ACL, addr: address)
 
@@ -201,8 +201,8 @@ assert! that the ACL has the address. Implementation -
public fun assert_contains(acl: &ACL, addr: address) {
-    assert!(contains(acl, addr), error::invalid_argument(ENOT_CONTAIN));
+
public fun assert_contains(self: &ACL, addr: address) {
+    assert!(self.contains(addr), error::invalid_argument(ENOT_CONTAIN));
 }
 
@@ -245,8 +245,8 @@ assert! that the ACL has the address. -
fun spec_contains(acl: ACL, addr: address): bool {
-   exists a in acl.list: a == addr
+
fun spec_contains(self: ACL, addr: address): bool {
+   exists a in self.list: a == addr
 }
 
@@ -257,14 +257,14 @@ assert! that the ACL has the address. ### Function `add` -
public fun add(acl: &mut acl::ACL, addr: address)
+
public fun add(self: &mut acl::ACL, addr: address)
 
-
aborts_if spec_contains(acl, addr) with error::INVALID_ARGUMENT;
-ensures spec_contains(acl, addr);
+
aborts_if spec_contains(self, addr) with error::INVALID_ARGUMENT;
+ensures spec_contains(self, addr);
 
@@ -274,14 +274,14 @@ assert! that the ACL has the address. ### Function `remove` -
public fun remove(acl: &mut acl::ACL, addr: address)
+
public fun remove(self: &mut acl::ACL, addr: address)
 
-
aborts_if !spec_contains(acl, addr) with error::INVALID_ARGUMENT;
-ensures !spec_contains(acl, addr);
+
aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT;
+ensures !spec_contains(self, addr);
 
@@ -291,13 +291,13 @@ assert! that the ACL has the address. ### Function `contains` -
public fun contains(acl: &acl::ACL, addr: address): bool
+
public fun contains(self: &acl::ACL, addr: address): bool
 
-
ensures result == spec_contains(acl, addr);
+
ensures result == spec_contains(self, addr);
 
@@ -307,13 +307,13 @@ assert! that the ACL has the address. ### Function `assert_contains` -
public fun assert_contains(acl: &acl::ACL, addr: address)
+
public fun assert_contains(self: &acl::ACL, addr: address)
 
-
aborts_if !spec_contains(acl, addr) with error::INVALID_ARGUMENT;
+
aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT;
 
diff --git a/aptos-move/framework/move-stdlib/doc/bcs.md b/aptos-move/framework/move-stdlib/doc/bcs.md index e01e1d89e1a28..954370f80c5c9 100644 --- a/aptos-move/framework/move-stdlib/doc/bcs.md +++ b/aptos-move/framework/move-stdlib/doc/bcs.md @@ -10,10 +10,15 @@ details on BCS. - [Function `to_bytes`](#0x1_bcs_to_bytes) +- [Function `serialized_size`](#0x1_bcs_serialized_size) +- [Function `constant_serialized_size`](#0x1_bcs_constant_serialized_size) - [Specification](#@Specification_0) + - [Function `serialized_size`](#@Specification_0_serialized_size) + - [Function `constant_serialized_size`](#@Specification_0_constant_serialized_size) -
+
use 0x1::option;
+
@@ -21,7 +26,9 @@ details on BCS. ## Function `to_bytes` -Return the binary representation of v in BCS (Binary Canonical Serialization) format +Note: all natives would fail if the MoveValue contains a permissioned signer in it. +Returns the binary representation of v in BCS (Binary Canonical Serialization) format. +Aborts with 0x1c5 error code if serialization fails.
public fun to_bytes<MoveValue>(v: &MoveValue): vector<u8>
@@ -38,6 +45,62 @@ Return the binary representation of v in BCS (Binary Canonical Seri
 
 
 
+
+
+
+
+## Function `serialized_size`
+
+Returns the size of the binary representation of v in BCS (Binary Canonical Serialization) format.
+Aborts with 0x1c5 error code if there is a failure when calculating serialized size.
+
+
+
public fun serialized_size<MoveValue>(v: &MoveValue): u64
+
+ + + +
+Implementation + + +
native public fun serialized_size<MoveValue>(v: &MoveValue): u64;
+
+ + + +
+ + + +## Function `constant_serialized_size` + +If the type has known constant (always the same, independent of instance) serialized size +in BCS (Binary Canonical Serialization) format, returns it, otherwise returns None. +Aborts with 0x1c5 error code if there is a failure when calculating serialized size. + +Note: +For some types it might not be known they have constant size, and function might return None. +For example, signer appears to have constant size, but it's size might change. +If this function returned Some() for some type before - it is guaranteed to continue returning Some(). +On the other hand, if function has returned None for some type, +it might change in the future to return Some() instead, if size becomes "known". + + +
public fun constant_serialized_size<MoveValue>(): option::Option<u64>
+
+ + + +
+Implementation + + +
native public fun constant_serialized_size<MoveValue>(): Option<u64>;
+
+ + +
@@ -56,4 +119,37 @@ Native function which is defined in the prover's prelude.
+ + + +### Function `serialized_size` + + +
public fun serialized_size<MoveValue>(v: &MoveValue): u64
+
+ + + + +
pragma opaque;
+ensures result == len(serialize(v));
+
+ + + + + +### Function `constant_serialized_size` + + +
public fun constant_serialized_size<MoveValue>(): option::Option<u64>
+
+ + + + +
pragma opaque;
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/doc/bit_vector.md b/aptos-move/framework/move-stdlib/doc/bit_vector.md index 873873de4e484..191befc2a7713 100644 --- a/aptos-move/framework/move-stdlib/doc/bit_vector.md +++ b/aptos-move/framework/move-stdlib/doc/bit_vector.md @@ -14,7 +14,6 @@ - [Function `is_index_set`](#0x1_bit_vector_is_index_set) - [Function `length`](#0x1_bit_vector_length) - [Function `longest_set_sequence_starting_at`](#0x1_bit_vector_longest_set_sequence_starting_at) -- [Function `shift_left_for_verification_only`](#0x1_bit_vector_shift_left_for_verification_only) - [Specification](#@Specification_1) - [Struct `BitVector`](#@Specification_1_BitVector) - [Function `new`](#@Specification_1_new) @@ -23,7 +22,6 @@ - [Function `shift_left`](#@Specification_1_shift_left) - [Function `is_index_set`](#@Specification_1_is_index_set) - [Function `longest_set_sequence_starting_at`](#@Specification_1_longest_set_sequence_starting_at) - - [Function `shift_left_for_verification_only`](#@Specification_1_shift_left_for_verification_only)
@@ -132,8 +130,8 @@ The maximum allowed bitvector size invariant len(bit_field) == counter; }; (counter < length)}) { - vector::push_back(&mut bit_field, false); - counter = counter + 1; + bit_field.push_back(false); + counter += 1; }; spec { assert counter == length; @@ -155,10 +153,10 @@ The maximum allowed bitvector size ## Function `set` -Set the bit at bit_index in the bitvector regardless of its previous state. +Set the bit at bit_index in the self regardless of its previous state. -
public fun set(bitvector: &mut bit_vector::BitVector, bit_index: u64)
+
public fun set(self: &mut bit_vector::BitVector, bit_index: u64)
 
@@ -167,10 +165,9 @@ Set the bit at bit_index in the bitvector regardless o Implementation -
public fun set(bitvector: &mut BitVector, bit_index: u64) {
-    assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX);
-    let x = vector::borrow_mut(&mut bitvector.bit_field, bit_index);
-    *x = true;
+
public fun set(self: &mut BitVector, bit_index: u64) {
+    assert!(bit_index < self.bit_field.length(), EINDEX);
+    self.bit_field[bit_index] = true;
 }
 
@@ -182,10 +179,10 @@ Set the bit at bit_index in the bitvector regardless o ## Function `unset` -Unset the bit at bit_index in the bitvector regardless of its previous state. +Unset the bit at bit_index in the self regardless of its previous state. -
public fun unset(bitvector: &mut bit_vector::BitVector, bit_index: u64)
+
public fun unset(self: &mut bit_vector::BitVector, bit_index: u64)
 
@@ -194,10 +191,9 @@ Unset the bit at bit_index in the bitvector regardless Implementation -
public fun unset(bitvector: &mut BitVector, bit_index: u64) {
-    assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX);
-    let x = vector::borrow_mut(&mut bitvector.bit_field, bit_index);
-    *x = false;
+
public fun unset(self: &mut BitVector, bit_index: u64) {
+    assert!(bit_index < self.bit_field.length(), EINDEX);
+    self.bit_field[bit_index] = false;
 }
 
@@ -209,11 +205,11 @@ Unset the bit at bit_index in the bitvector regardless ## Function `shift_left` -Shift the bitvector left by amount. If amount is greater than the +Shift the self left by amount. If amount is greater than the bitvector's length the bitvector will be zeroed out. -
public fun shift_left(bitvector: &mut bit_vector::BitVector, amount: u64)
+
public fun shift_left(self: &mut bit_vector::BitVector, amount: u64)
 
@@ -222,25 +218,25 @@ bitvector's length the bitvector will be zeroed out. Implementation -
public fun shift_left(bitvector: &mut BitVector, amount: u64) {
-    if (amount >= bitvector.length) {
-        vector::for_each_mut(&mut bitvector.bit_field, |elem| {
+
public fun shift_left(self: &mut BitVector, amount: u64) {
+    if (amount >= self.length) {
+        self.bit_field.for_each_mut(|elem| {
             *elem = false;
         });
     } else {
         let i = amount;
 
-        while (i < bitvector.length) {
-            if (is_index_set(bitvector, i)) set(bitvector, i - amount)
-            else unset(bitvector, i - amount);
-            i = i + 1;
+        while (i < self.length) {
+            if (self.is_index_set(i)) self.set(i - amount)
+            else self.unset(i - amount);
+            i += 1;
         };
 
-        i = bitvector.length - amount;
+        i = self.length - amount;
 
-        while (i < bitvector.length) {
-            unset(bitvector, i);
-            i = i + 1;
+        while (i < self.length) {
+            self.unset(i);
+            i += 1;
         };
     }
 }
@@ -254,11 +250,11 @@ bitvector's length the bitvector will be zeroed out.
 
 ## Function `is_index_set`
 
-Return the value of the bit at bit_index in the bitvector. true
+Return the value of the bit at bit_index in the self. true
 represents "1" and false represents a 0
 
 
-
public fun is_index_set(bitvector: &bit_vector::BitVector, bit_index: u64): bool
+
public fun is_index_set(self: &bit_vector::BitVector, bit_index: u64): bool
 
@@ -267,9 +263,9 @@ represents "1" and false represents a 0 Implementation -
public fun is_index_set(bitvector: &BitVector, bit_index: u64): bool {
-    assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX);
-    *vector::borrow(&bitvector.bit_field, bit_index)
+
public fun is_index_set(self: &BitVector, bit_index: u64): bool {
+    assert!(bit_index < self.bit_field.length(), EINDEX);
+    self.bit_field[bit_index]
 }
 
@@ -284,7 +280,7 @@ represents "1" and false represents a 0 Return the length (number of usable bits) of this bitvector -
public fun length(bitvector: &bit_vector::BitVector): u64
+
public fun length(self: &bit_vector::BitVector): u64
 
@@ -293,8 +289,8 @@ Return the length (number of usable bits) of this bitvector Implementation -
public fun length(bitvector: &BitVector): u64 {
-    vector::length(&bitvector.bit_field)
+
public fun length(self: &BitVector): u64 {
+    self.bit_field.length()
 }
 
@@ -311,7 +307,7 @@ including) start_index in the bitvector. If there is n sequence, then 0 is returned. -
public fun longest_set_sequence_starting_at(bitvector: &bit_vector::BitVector, start_index: u64): u64
+
public fun longest_set_sequence_starting_at(self: &bit_vector::BitVector, start_index: u64): u64
 
@@ -320,23 +316,23 @@ sequence, then 0 is returned. Implementation -
public fun longest_set_sequence_starting_at(bitvector: &BitVector, start_index: u64): u64 {
-    assert!(start_index < bitvector.length, EINDEX);
+
public fun longest_set_sequence_starting_at(self: &BitVector, start_index: u64): u64 {
+    assert!(start_index < self.length, EINDEX);
     let index = start_index;
 
     // Find the greatest index in the vector such that all indices less than it are set.
     while ({
         spec {
             invariant index >= start_index;
-            invariant index == start_index || is_index_set(bitvector, index - 1);
-            invariant index == start_index || index - 1 < vector::length(bitvector.bit_field);
-            invariant forall j in start_index..index: is_index_set(bitvector, j);
-            invariant forall j in start_index..index: j < vector::length(bitvector.bit_field);
+            invariant index == start_index || self.is_index_set(index - 1);
+            invariant index == start_index || index - 1 < len(self.bit_field);
+            invariant forall j in start_index..index: self.is_index_set(j);
+            invariant forall j in start_index..index: j < len(self.bit_field);
         };
-        index < bitvector.length
+        index < self.length
     }) {
-        if (!is_index_set(bitvector, index)) break;
-        index = index + 1;
+        if (!self.is_index_set(index)) break;
+        index += 1;
     };
 
     index - start_index
@@ -345,78 +341,6 @@ sequence, then 0 is returned.
 
 
 
-
-
-
-
-## Function `shift_left_for_verification_only`
-
-
-
-
#[verify_only]
-public fun shift_left_for_verification_only(bitvector: &mut bit_vector::BitVector, amount: u64)
-
- - - -
-Implementation - - -
public fun shift_left_for_verification_only(bitvector: &mut BitVector, amount: u64) {
-    if (amount >= bitvector.length) {
-        let len = vector::length(&bitvector.bit_field);
-        let i = 0;
-        while ({
-            spec {
-                invariant len == bitvector.length;
-                invariant forall k in 0..i: !bitvector.bit_field[k];
-                invariant forall k in i..bitvector.length: bitvector.bit_field[k] == old(bitvector).bit_field[k];
-            };
-            i < len
-        }) {
-            let elem = vector::borrow_mut(&mut bitvector.bit_field, i);
-            *elem = false;
-            i = i + 1;
-        };
-    } else {
-        let i = amount;
-
-        while ({
-            spec {
-                invariant i >= amount;
-                invariant bitvector.length == old(bitvector).length;
-                invariant forall j in amount..i: old(bitvector).bit_field[j] == bitvector.bit_field[j - amount];
-                invariant forall j in (i-amount)..bitvector.length : old(bitvector).bit_field[j] == bitvector.bit_field[j];
-                invariant forall k in 0..i-amount: bitvector.bit_field[k] == old(bitvector).bit_field[k + amount];
-            };
-            i < bitvector.length
-        }) {
-            if (is_index_set(bitvector, i)) set(bitvector, i - amount)
-            else unset(bitvector, i - amount);
-            i = i + 1;
-        };
-
-
-        i = bitvector.length - amount;
-
-        while ({
-            spec {
-                invariant forall j in bitvector.length - amount..i: !bitvector.bit_field[j];
-                invariant forall k in 0..bitvector.length - amount: bitvector.bit_field[k] == old(bitvector).bit_field[k + amount];
-                invariant i >= bitvector.length - amount;
-            };
-            i < bitvector.length
-        }) {
-            unset(bitvector, i);
-            i = i + 1;
-        }
-    }
-}
-
- - -
@@ -492,14 +416,14 @@ sequence, then 0 is returned. ### Function `set` -
public fun set(bitvector: &mut bit_vector::BitVector, bit_index: u64)
+
public fun set(self: &mut bit_vector::BitVector, bit_index: u64)
 
include SetAbortsIf;
-ensures bitvector.bit_field[bit_index];
+ensures self.bit_field[bit_index];
 
@@ -509,9 +433,9 @@ sequence, then 0 is returned.
schema SetAbortsIf {
-    bitvector: BitVector;
+    self: BitVector;
     bit_index: u64;
-    aborts_if bit_index >= length(bitvector) with EINDEX;
+    aborts_if bit_index >= self.length() with EINDEX;
 }
 
@@ -522,14 +446,14 @@ sequence, then 0 is returned. ### Function `unset` -
public fun unset(bitvector: &mut bit_vector::BitVector, bit_index: u64)
+
public fun unset(self: &mut bit_vector::BitVector, bit_index: u64)
 
include UnsetAbortsIf;
-ensures !bitvector.bit_field[bit_index];
+ensures !self.bit_field[bit_index];
 
@@ -539,9 +463,9 @@ sequence, then 0 is returned.
schema UnsetAbortsIf {
-    bitvector: BitVector;
+    self: BitVector;
     bit_index: u64;
-    aborts_if bit_index >= length(bitvector) with EINDEX;
+    aborts_if bit_index >= self.length() with EINDEX;
 }
 
@@ -552,7 +476,7 @@ sequence, then 0 is returned. ### Function `shift_left` -
public fun shift_left(bitvector: &mut bit_vector::BitVector, amount: u64)
+
public fun shift_left(self: &mut bit_vector::BitVector, amount: u64)
 
@@ -568,14 +492,14 @@ sequence, then 0 is returned. ### Function `is_index_set` -
public fun is_index_set(bitvector: &bit_vector::BitVector, bit_index: u64): bool
+
public fun is_index_set(self: &bit_vector::BitVector, bit_index: u64): bool
 
include IsIndexSetAbortsIf;
-ensures result == bitvector.bit_field[bit_index];
+ensures result == self.bit_field[bit_index];
 
@@ -585,9 +509,9 @@ sequence, then 0 is returned.
schema IsIndexSetAbortsIf {
-    bitvector: BitVector;
+    self: BitVector;
     bit_index: u64;
-    aborts_if bit_index >= length(bitvector) with EINDEX;
+    aborts_if bit_index >= self.length() with EINDEX;
 }
 
@@ -597,11 +521,11 @@ sequence, then 0 is returned. -
fun spec_is_index_set(bitvector: BitVector, bit_index: u64): bool {
-   if (bit_index >= length(bitvector)) {
+
fun spec_is_index_set(self: BitVector, bit_index: u64): bool {
+   if (bit_index >= self.length()) {
        false
    } else {
-       bitvector.bit_field[bit_index]
+       self.bit_field[bit_index]
    }
 }
 
@@ -613,36 +537,14 @@ sequence, then 0 is returned. ### Function `longest_set_sequence_starting_at` -
public fun longest_set_sequence_starting_at(bitvector: &bit_vector::BitVector, start_index: u64): u64
-
- - - - -
aborts_if start_index >= bitvector.length;
-ensures forall i in start_index..result: is_index_set(bitvector, i);
-
- - - - - -### Function `shift_left_for_verification_only` - - -
#[verify_only]
-public fun shift_left_for_verification_only(bitvector: &mut bit_vector::BitVector, amount: u64)
+
public fun longest_set_sequence_starting_at(self: &bit_vector::BitVector, start_index: u64): u64
 
-
aborts_if false;
-ensures amount >= bitvector.length ==> (forall k in 0..bitvector.length: !bitvector.bit_field[k]);
-ensures amount < bitvector.length ==>
-    (forall i in bitvector.length - amount..bitvector.length: !bitvector.bit_field[i]);
-ensures amount < bitvector.length ==>
-    (forall i in 0..bitvector.length - amount: bitvector.bit_field[i] == old(bitvector).bit_field[i + amount]);
+
aborts_if start_index >= self.length;
+ensures forall i in start_index..result: self.is_index_set(i);
 
diff --git a/aptos-move/framework/move-stdlib/doc/cmp.md b/aptos-move/framework/move-stdlib/doc/cmp.md new file mode 100644 index 0000000000000..f6f43999bfe93 --- /dev/null +++ b/aptos-move/framework/move-stdlib/doc/cmp.md @@ -0,0 +1,281 @@ + + + +# Module `0x1::cmp` + + + +- [Enum `Ordering`](#0x1_cmp_Ordering) +- [Function `compare`](#0x1_cmp_compare) +- [Function `is_eq`](#0x1_cmp_is_eq) +- [Function `is_ne`](#0x1_cmp_is_ne) +- [Function `is_lt`](#0x1_cmp_is_lt) +- [Function `is_le`](#0x1_cmp_is_le) +- [Function `is_gt`](#0x1_cmp_is_gt) +- [Function `is_ge`](#0x1_cmp_is_ge) +- [Specification](#@Specification_0) + - [Function `compare`](#@Specification_0_compare) + + +
+ + + + + +## Enum `Ordering` + + + +
enum Ordering has copy, drop
+
+ + + +
+Variants + + +
+Less + + +
+Fields + + +
+
+ + +
+ +
+ +
+Equal + + +
+Fields + + +
+
+ + +
+ +
+ +
+Greater + + +
+Fields + + +
+
+ + +
+ +
+ +
+ + + +## Function `compare` + +Compares two values with the natural ordering: +- native types are compared identically to < and other operators +- complex types +- Structs and vectors - are compared lexicographically - first field/element is compared first, +and if equal we proceed to the next. +- enum's are compared first by their variant, and if equal - they are compared as structs are. + + +
public fun compare<T>(first: &T, second: &T): cmp::Ordering
+
+ + + +
+Implementation + + +
native public fun compare<T>(first: &T, second: &T): Ordering;
+
+ + + +
+ + + +## Function `is_eq` + + + +
public fun is_eq(self: &cmp::Ordering): bool
+
+ + + +
+Implementation + + +
public fun is_eq(self: &Ordering): bool {
+    self is Ordering::Equal
+}
+
+ + + +
+ + + +## Function `is_ne` + + + +
public fun is_ne(self: &cmp::Ordering): bool
+
+ + + +
+Implementation + + +
public fun is_ne(self: &Ordering): bool {
+    !(self is Ordering::Equal)
+}
+
+ + + +
+ + + +## Function `is_lt` + + + +
public fun is_lt(self: &cmp::Ordering): bool
+
+ + + +
+Implementation + + +
public fun is_lt(self: &Ordering): bool {
+    self is Ordering::Less
+}
+
+ + + +
+ + + +## Function `is_le` + + + +
public fun is_le(self: &cmp::Ordering): bool
+
+ + + +
+Implementation + + +
public fun is_le(self: &Ordering): bool {
+    !(self is Ordering::Greater)
+}
+
+ + + +
+ + + +## Function `is_gt` + + + +
public fun is_gt(self: &cmp::Ordering): bool
+
+ + + +
+Implementation + + +
public fun is_gt(self: &Ordering): bool {
+    self is Ordering::Greater
+}
+
+ + + +
+ + + +## Function `is_ge` + + + +
public fun is_ge(self: &cmp::Ordering): bool
+
+ + + +
+Implementation + + +
public fun is_ge(self: &Ordering): bool {
+    !(self is Ordering::Less)
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `compare` + + +
public fun compare<T>(first: &T, second: &T): cmp::Ordering
+
+ + + + +
pragma opaque;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/doc/features.md b/aptos-move/framework/move-stdlib/doc/features.md index 03b8c23d86c3d..1d05c7d33f816 100644 --- a/aptos-move/framework/move-stdlib/doc/features.md +++ b/aptos-move/framework/move-stdlib/doc/features.md @@ -129,14 +129,32 @@ return true. - [Function `default_to_concurrent_fungible_balance_enabled`](#0x1_features_default_to_concurrent_fungible_balance_enabled) - [Function `get_abort_if_multisig_payload_mismatch_feature`](#0x1_features_get_abort_if_multisig_payload_mismatch_feature) - [Function `abort_if_multisig_payload_mismatch_enabled`](#0x1_features_abort_if_multisig_payload_mismatch_enabled) -- [Function `get_atomic_bridge_feature`](#0x1_features_get_atomic_bridge_feature) -- [Function `abort_atomic_bridge_enabled`](#0x1_features_abort_atomic_bridge_enabled) -- [Function `get_native_bridge_feature`](#0x1_features_get_native_bridge_feature) -- [Function `abort_native_bridge_enabled`](#0x1_features_abort_native_bridge_enabled) -- [Function `get_governed_gas_pool_feature`](#0x1_features_get_governed_gas_pool_feature) -- [Function `governed_gas_pool_enabled`](#0x1_features_governed_gas_pool_enabled) -- [Function `get_decommission_core_resources_feature`](#0x1_features_get_decommission_core_resources_feature) -- [Function `get_decommission_core_resources_enabled`](#0x1_features_get_decommission_core_resources_enabled) +- [Function `get_transaction_simulation_enhancement_feature`](#0x1_features_get_transaction_simulation_enhancement_feature) +- [Function `transaction_simulation_enhancement_enabled`](#0x1_features_transaction_simulation_enhancement_enabled) +- [Function `get_collection_owner_feature`](#0x1_features_get_collection_owner_feature) +- [Function `is_collection_owner_enabled`](#0x1_features_is_collection_owner_enabled) +- [Function `get_native_memory_operations_feature`](#0x1_features_get_native_memory_operations_feature) +- [Function `is_native_memory_operations_enabled`](#0x1_features_is_native_memory_operations_enabled) +- [Function `get_permissioned_signer_feature`](#0x1_features_get_permissioned_signer_feature) +- [Function `is_permissioned_signer_enabled`](#0x1_features_is_permissioned_signer_enabled) +- [Function `get_account_abstraction_feature`](#0x1_features_get_account_abstraction_feature) +- [Function `is_account_abstraction_enabled`](#0x1_features_is_account_abstraction_enabled) +- [Function `get_bulletproofs_batch_feature`](#0x1_features_get_bulletproofs_batch_feature) +- [Function `bulletproofs_batch_enabled`](#0x1_features_bulletproofs_batch_enabled) +- [Function `is_derivable_account_abstraction_enabled`](#0x1_features_is_derivable_account_abstraction_enabled) +- [Function `is_domain_account_abstraction_enabled`](#0x1_features_is_domain_account_abstraction_enabled) +- [Function `get_new_accounts_default_to_fa_store_feature`](#0x1_features_get_new_accounts_default_to_fa_store_feature) +- [Function `new_accounts_default_to_fa_store_enabled`](#0x1_features_new_accounts_default_to_fa_store_enabled) +- [Function `get_default_account_resource_feature`](#0x1_features_get_default_account_resource_feature) +- [Function `is_default_account_resource_enabled`](#0x1_features_is_default_account_resource_enabled) +- [Function `get_jwk_consensus_per_key_mode_feature`](#0x1_features_get_jwk_consensus_per_key_mode_feature) +- [Function `is_jwk_consensus_per_key_mode_enabled`](#0x1_features_is_jwk_consensus_per_key_mode_enabled) +- [Function `get_orderless_transactions_feature`](#0x1_features_get_orderless_transactions_feature) +- [Function `orderless_transactions_enabled`](#0x1_features_orderless_transactions_enabled) +- [Function `get_calculate_transaction_fee_for_distribution_feature`](#0x1_features_get_calculate_transaction_fee_for_distribution_feature) +- [Function `is_calculate_transaction_fee_for_distribution_enabled`](#0x1_features_is_calculate_transaction_fee_for_distribution_enabled) +- [Function `get_distribute_transaction_fee_feature`](#0x1_features_get_distribute_transaction_fee_feature) +- [Function `is_distribute_transaction_fee_enabled`](#0x1_features_is_distribute_transaction_fee_enabled) - [Function `change_feature_flags`](#0x1_features_change_feature_flags) - [Function `change_feature_flags_internal`](#0x1_features_change_feature_flags_internal) - [Function `change_feature_flags_for_next_epoch`](#0x1_features_change_feature_flags_for_next_epoch) @@ -146,7 +164,6 @@ return true. - [Function `contains`](#0x1_features_contains) - [Function `apply_diff`](#0x1_features_apply_diff) - [Function `ensure_framework_signer`](#0x1_features_ensure_framework_signer) -- [Function `change_feature_flags_for_verification`](#0x1_features_change_feature_flags_for_verification) - [Specification](#@Specification_1) - [Resource `Features`](#@Specification_1_Features) - [Resource `PendingFeatures`](#@Specification_1_PendingFeatures) @@ -154,6 +171,7 @@ return true. - [Function `partial_governance_voting_enabled`](#@Specification_1_partial_governance_voting_enabled) - [Function `module_event_enabled`](#@Specification_1_module_event_enabled) - [Function `abort_if_multisig_payload_mismatch_enabled`](#@Specification_1_abort_if_multisig_payload_mismatch_enabled) + - [Function `is_default_account_resource_enabled`](#@Specification_1_is_default_account_resource_enabled) - [Function `change_feature_flags_internal`](#@Specification_1_change_feature_flags_internal) - [Function `change_feature_flags_for_next_epoch`](#@Specification_1_change_feature_flags_for_next_epoch) - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) @@ -245,6 +263,18 @@ Lifetime: transient + + +Whether the account abstraction is enabled. + +Lifetime: transient + + +
const ACCOUNT_ABSTRACTION: u64 = 85;
+
+ + + @@ -277,17 +307,6 @@ Lifetime: transient - - -Whether the Atomic bridge is available -Lifetime: transient - - -
const ATOMIC_BRIDGE: u64 = 71;
-
- - - Whether the new BLAKE2B-256 hash function native is enabled. @@ -324,6 +343,17 @@ Lifetime: transient + + +Whether the batch Bulletproofs native functions are available. This is needed because of the introduction of a new native function. +Lifetime: transient + + +
const BULLETPROOFS_BATCH_NATIVES: u64 = 87;
+
+ + + Whether the Bulletproofs zero-knowledge range proof module is enabled, and the related native function is @@ -336,6 +366,16 @@ Lifetime: transient + + +Whether to calculate the transaction fee for distribution. + + +
const CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION: u64 = 96;
+
+ + + Charge invariant violation error. @@ -371,9 +411,18 @@ Lifetime: transient + + + + +
const COLLECTION_OWNER: u64 = 79;
+
+ + + -Whether gas fees are collected and distributed to the block proposers. +Deprecated feature Lifetime: transient @@ -429,12 +478,12 @@ Lifetime: transient - + Lifetime: transient -
const DECOMMISSION_CORE_RESOURCES: u64 = 222;
+
const DEFAULT_ACCOUNT_RESOURCE: u64 = 91;
 
@@ -483,6 +532,18 @@ Lifetime: transient + + +Whether the account abstraction is enabled. + +Lifetime: transient + + +
const DERIVABLE_ACCOUNT_ABSTRACTION: u64 = 88;
+
+ + + Whether the dispatchable fungible asset standard feature is enabled. @@ -495,6 +556,16 @@ Lifetime: transient + + +Whether to distribute transaction fee to validators. + + +
const DISTRIBUTE_TRANSACTION_FEE: u64 = 97;
+
+ + + @@ -544,6 +615,19 @@ The provided signer has not a framework address. + + +Whether function values are enabled. +Lifetime: transient + +We do not expect use from Move, so for now only for documentation purposes here + + +
const ENABLE_FUNCTION_VALUES: u64 = 89;
+
+ + + @@ -564,24 +648,28 @@ Lifetime: transient - - -Whether the Governed Gas Pool is used to capture gas fees + -Lifetime: permanent +Deprecated by aptos_framework::jwk_consensus_config::JWKConsensusConfig. -
const GOVERNED_GAS_POOL: u64 = 73;
+
const JWK_CONSENSUS: u64 = 49;
 
- + -Deprecated by aptos_framework::jwk_consensus_config::JWKConsensusConfig. +If enabled, JWK consensus should run in per-key mode, where: +- The consensus is for key-level updates +(e.g., "issuer A key 1 should be deleted", "issuer B key 2 should be upserted"); +- transaction type ValidatorTransaction::ObservedJWKUpdate is reused; +- while a key-level update is mostly represented by a new type KeyLevelUpdate locally, +For simplicity, it is represented by type ProviderJWKs (used to represent issuer-level update) +in JWK Consensus messages, in validator transactions, and in Move. -
const JWK_CONSENSUS: u64 = 49;
+
const JWK_CONSENSUS_PER_KEY_MODE: u64 = 92;
 
@@ -699,13 +787,11 @@ Lifetime: transient - + -Whether the Atomic bridge is available -Lifetime: transient -
const NATIVE_BRIDGE: u64 = 72;
+
const NATIVE_MEMORY_OPERATIONS: u64 = 80;
 
@@ -720,6 +806,17 @@ Lifetime: transient + + +Whether new accounts default to the Fungible Asset store. +Lifetime: transient + + +
const NEW_ACCOUNTS_DEFAULT_TO_FA_STORE: u64 = 90;
+
+ + + Whether deploying to objects is enabled. @@ -761,6 +858,17 @@ Lifetime: transient + + +Whether orderless transactions are enabled. +Lifetime: transient + + +
const ORDERLESS_TRANSACTIONS: u64 = 94;
+
+ + + Whether enable paritial governance voting on aptos_governance. @@ -783,6 +891,15 @@ Lifetime: transient + + + + +
const PERMISSIONED_SIGNER: u64 = 84;
+
+ + + @@ -919,6 +1036,20 @@ Lifetime: transient + + +Whether the simulation enhancement is enabled. This enables the simulation without an authentication check, +the sponsored transaction simulation when the fee payer is set to 0x0, and the multisig transaction +simulation consistnet with the execution. + +Lifetime: transient + + +
const TRANSACTION_SIMULATION_ENHANCEMENT: u64 = 78;
+
+ + + Whether during upgrade compatibility checking, friend functions should be treated similar like @@ -951,6 +1082,19 @@ Lifetime: transient + + +Whether bytecode version v8 is enabled. +Lifetime: transient + +We do not expect use from Move, so for now only for documentation purposes here + + +
const VM_BINARY_FORMAT_V8: u64 = 86;
+
+ + + ## Function `code_dependency_check_enabled` @@ -1141,9 +1285,11 @@ Lifetime: transient ## Function `get_collect_and_distribute_gas_fees_feature` +Deprecated feature -
public fun get_collect_and_distribute_gas_fees_feature(): u64
+
#[deprecated]
+public fun get_collect_and_distribute_gas_fees_feature(): u64
 
@@ -1165,7 +1311,8 @@ Lifetime: transient -
public fun collect_and_distribute_gas_fees(): bool
+
#[deprecated]
+public fun collect_and_distribute_gas_fees(): bool
 
@@ -1174,8 +1321,8 @@ Lifetime: transient Implementation -
public fun collect_and_distribute_gas_fees(): bool acquires Features {
-    is_enabled(COLLECT_AND_DISTRIBUTE_GAS_FEES)
+
public fun collect_and_distribute_gas_fees(): bool {
+    false
 }
 
@@ -3206,13 +3353,13 @@ Lifetime: transient - + -## Function `get_atomic_bridge_feature` +## Function `get_transaction_simulation_enhancement_feature` -
public fun get_atomic_bridge_feature(): u64
+
public fun get_transaction_simulation_enhancement_feature(): u64
 
@@ -3221,20 +3368,20 @@ Lifetime: transient Implementation -
public fun get_atomic_bridge_feature(): u64 { ATOMIC_BRIDGE }
+
public fun get_transaction_simulation_enhancement_feature(): u64 { TRANSACTION_SIMULATION_ENHANCEMENT }
 
- + -## Function `abort_atomic_bridge_enabled` +## Function `transaction_simulation_enhancement_enabled` -
public fun abort_atomic_bridge_enabled(): bool
+
public fun transaction_simulation_enhancement_enabled(): bool
 
@@ -3243,8 +3390,8 @@ Lifetime: transient Implementation -
public fun abort_atomic_bridge_enabled(): bool acquires Features {
-    is_enabled(ATOMIC_BRIDGE)
+
public fun transaction_simulation_enhancement_enabled(): bool acquires Features {
+    is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT)
 }
 
@@ -3252,13 +3399,13 @@ Lifetime: transient - + -## Function `get_native_bridge_feature` +## Function `get_collection_owner_feature` -
public fun get_native_bridge_feature(): u64
+
public fun get_collection_owner_feature(): u64
 
@@ -3267,20 +3414,20 @@ Lifetime: transient Implementation -
public fun get_native_bridge_feature(): u64 { NATIVE_BRIDGE }
+
public fun get_collection_owner_feature(): u64 { COLLECTION_OWNER }
 
- + -## Function `abort_native_bridge_enabled` +## Function `is_collection_owner_enabled` -
public fun abort_native_bridge_enabled(): bool
+
public fun is_collection_owner_enabled(): bool
 
@@ -3289,8 +3436,8 @@ Lifetime: transient Implementation -
public fun abort_native_bridge_enabled(): bool acquires Features {
-    is_enabled(NATIVE_BRIDGE)
+
public fun is_collection_owner_enabled(): bool acquires Features {
+    is_enabled(COLLECTION_OWNER)
 }
 
@@ -3298,14 +3445,13 @@ Lifetime: transient - + -## Function `get_governed_gas_pool_feature` +## Function `get_native_memory_operations_feature` -Whether the Governed Gas Pool is enabled. -
public fun get_governed_gas_pool_feature(): u64
+
public fun get_native_memory_operations_feature(): u64
 
@@ -3314,20 +3460,20 @@ Whether the Governed Gas Pool is enabled. Implementation -
public fun get_governed_gas_pool_feature(): u64 { GOVERNED_GAS_POOL }
+
public fun get_native_memory_operations_feature(): u64 { NATIVE_MEMORY_OPERATIONS }
 
- + -## Function `governed_gas_pool_enabled` +## Function `is_native_memory_operations_enabled` -
public fun governed_gas_pool_enabled(): bool
+
public fun is_native_memory_operations_enabled(): bool
 
@@ -3336,8 +3482,8 @@ Whether the Governed Gas Pool is enabled. Implementation -
public fun governed_gas_pool_enabled(): bool acquires Features {
-    is_enabled(GOVERNED_GAS_POOL)
+
public fun is_native_memory_operations_enabled(): bool acquires Features {
+    is_enabled(NATIVE_MEMORY_OPERATIONS)
 }
 
@@ -3345,13 +3491,13 @@ Whether the Governed Gas Pool is enabled. - + -## Function `get_decommission_core_resources_feature` +## Function `get_permissioned_signer_feature` -
public fun get_decommission_core_resources_feature(): u64
+
public fun get_permissioned_signer_feature(): u64
 
@@ -3360,20 +3506,20 @@ Whether the Governed Gas Pool is enabled. Implementation -
public fun get_decommission_core_resources_feature(): u64 { DECOMMISSION_CORE_RESOURCES }
+
public fun get_permissioned_signer_feature(): u64 { PERMISSIONED_SIGNER }
 
- + -## Function `get_decommission_core_resources_enabled` +## Function `is_permissioned_signer_enabled` -
public fun get_decommission_core_resources_enabled(): bool
+
public fun is_permissioned_signer_enabled(): bool
 
@@ -3382,8 +3528,8 @@ Whether the Governed Gas Pool is enabled. Implementation -
public fun get_decommission_core_resources_enabled(): bool acquires Features {
-    is_enabled(DECOMMISSION_CORE_RESOURCES)
+
public fun is_permissioned_signer_enabled(): bool acquires Features {
+    is_enabled(PERMISSIONED_SIGNER)
 }
 
@@ -3391,20 +3537,13 @@ Whether the Governed Gas Pool is enabled. - - -## Function `change_feature_flags` - -Deprecated to prevent validator set changes during DKG. - -Genesis/tests should use change_feature_flags_internal() for feature vec initialization. + -This can be used on testnet prior to successful DKG. +## Function `get_account_abstraction_feature` -Governance proposals should use change_feature_flags_for_next_epoch() to enable/disable features. -
public fun change_feature_flags(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
public fun get_account_abstraction_feature(): u64
 
@@ -3413,23 +3552,20 @@ Governance proposals should use change_feature_flags(framework: &signer, enable: vector<u64>, disable: vector<u64>) acquires Features { - change_feature_flags_internal(framework, enable, disable) -} +
public fun get_account_abstraction_feature(): u64 { ACCOUNT_ABSTRACTION }
 
- + -## Function `change_feature_flags_internal` +## Function `is_account_abstraction_enabled` -Update feature flags directly. Only used in genesis/tests. -
fun change_feature_flags_internal(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
public fun is_account_abstraction_enabled(): bool
 
@@ -3438,18 +3574,8 @@ Update feature flags directly. Only used in genesis/tests. Implementation -
fun change_feature_flags_internal(framework: &signer, enable: vector<u64>, disable: vector<u64>) acquires Features {
-    assert!(signer::address_of(framework) == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
-    if (!exists<Features>(@std)) {
-        move_to<Features>(framework, Features { features: vector[] })
-    };
-    let features = &mut borrow_global_mut<Features>(@std).features;
-    vector::for_each_ref(&enable, |feature| {
-        set(features, *feature, true);
-    });
-    vector::for_each_ref(&disable, |feature| {
-        set(features, *feature, false);
-    });
+
public fun is_account_abstraction_enabled(): bool acquires Features {
+    is_enabled(ACCOUNT_ABSTRACTION)
 }
 
@@ -3457,14 +3583,13 @@ Update feature flags directly. Only used in genesis/tests. - + -## Function `change_feature_flags_for_next_epoch` +## Function `get_bulletproofs_batch_feature` -Enable and disable features for the next epoch. -
public fun change_feature_flags_for_next_epoch(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
public fun get_bulletproofs_batch_feature(): u64
 
@@ -3473,47 +3598,20 @@ Enable and disable features for the next epoch. Implementation -
public fun change_feature_flags_for_next_epoch(
-    framework: &signer,
-    enable: vector<u64>,
-    disable: vector<u64>
-) acquires PendingFeatures, Features {
-    assert!(signer::address_of(framework) == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
-
-    // Figure out the baseline feature vec that the diff will be applied to.
-    let new_feature_vec = if (exists<PendingFeatures>(@std)) {
-        // If there is a buffered feature vec, use it as the baseline.
-        let PendingFeatures { features } = move_from<PendingFeatures>(@std);
-        features
-    } else if (exists<Features>(@std)) {
-        // Otherwise, use the currently effective feature flag vec as the baseline, if it exists.
-        borrow_global<Features>(@std).features
-    } else {
-        // Otherwise, use an empty feature vec.
-        vector[]
-    };
-
-    // Apply the diff and save it to the buffer.
-    apply_diff(&mut new_feature_vec, enable, disable);
-    move_to(framework, PendingFeatures { features: new_feature_vec });
-}
+
public fun get_bulletproofs_batch_feature(): u64 { BULLETPROOFS_BATCH_NATIVES }
 
- - -## Function `on_new_epoch` + -Apply all the pending feature flag changes. Should only be used at the end of a reconfiguration with DKG. +## Function `bulletproofs_batch_enabled` -While the scope is public, it can only be usd in system transactions like block_prologue and governance proposals, -who have permission to set the flag that's checked in extract(). -
public fun on_new_epoch(framework: &signer)
+
public fun bulletproofs_batch_enabled(): bool
 
@@ -3522,16 +3620,8 @@ who have permission to set the flag that's checked in extract(). Implementation -
public fun on_new_epoch(framework: &signer) acquires Features, PendingFeatures {
-    ensure_framework_signer(framework);
-    if (exists<PendingFeatures>(@std)) {
-        let PendingFeatures { features } = move_from<PendingFeatures>(@std);
-        if (exists<Features>(@std)) {
-            borrow_global_mut<Features>(@std).features = features;
-        } else {
-            move_to(framework, Features { features })
-        }
-    }
+
public fun bulletproofs_batch_enabled(): bool acquires Features {
+    is_enabled(BULLETPROOFS_BATCH_NATIVES)
 }
 
@@ -3539,15 +3629,13 @@ who have permission to set the flag that's checked in extract(). - + -## Function `is_enabled` +## Function `is_derivable_account_abstraction_enabled` -Check whether the feature is enabled. -
#[view]
-public fun is_enabled(feature: u64): bool
+
public fun is_derivable_account_abstraction_enabled(): bool
 
@@ -3556,9 +3644,8 @@ Check whether the feature is enabled. Implementation -
public fun is_enabled(feature: u64): bool acquires Features {
-    exists<Features>(@std) &&
-        contains(&borrow_global<Features>(@std).features, feature)
+
public fun is_derivable_account_abstraction_enabled(): bool acquires Features {
+    is_enabled(DERIVABLE_ACCOUNT_ABSTRACTION)
 }
 
@@ -3566,14 +3653,14 @@ Check whether the feature is enabled. - + -## Function `set` +## Function `is_domain_account_abstraction_enabled` -Helper to include or exclude a feature flag. -
fun set(features: &mut vector<u8>, feature: u64, include: bool)
+
#[deprecated]
+public fun is_domain_account_abstraction_enabled(): bool
 
@@ -3582,17 +3669,8 @@ Helper to include or exclude a feature flag. Implementation -
fun set(features: &mut vector<u8>, feature: u64, include: bool) {
-    let byte_index = feature / 8;
-    let bit_mask = 1 << ((feature % 8) as u8);
-    while (vector::length(features) <= byte_index) {
-        vector::push_back(features, 0)
-    };
-    let entry = vector::borrow_mut(features, byte_index);
-    if (include)
-        *entry = *entry | bit_mask
-    else
-        *entry = *entry & (0xff ^ bit_mask)
+
public fun is_domain_account_abstraction_enabled(): bool {
+    false
 }
 
@@ -3600,14 +3678,13 @@ Helper to include or exclude a feature flag. - + -## Function `contains` +## Function `get_new_accounts_default_to_fa_store_feature` -Helper to check whether a feature flag is enabled. -
fun contains(features: &vector<u8>, feature: u64): bool
+
public fun get_new_accounts_default_to_fa_store_feature(): u64
 
@@ -3616,24 +3693,20 @@ Helper to check whether a feature flag is enabled. Implementation -
fun contains(features: &vector<u8>, feature: u64): bool {
-    let byte_index = feature / 8;
-    let bit_mask = 1 << ((feature % 8) as u8);
-    byte_index < vector::length(features) && (*vector::borrow(features, byte_index) & bit_mask) != 0
-}
+
public fun get_new_accounts_default_to_fa_store_feature(): u64 { NEW_ACCOUNTS_DEFAULT_TO_FA_STORE }
 
- + -## Function `apply_diff` +## Function `new_accounts_default_to_fa_store_enabled` -
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>)
+
public fun new_accounts_default_to_fa_store_enabled(): bool
 
@@ -3642,13 +3715,8 @@ Helper to check whether a feature flag is enabled. Implementation -
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>) {
-    vector::for_each(enable, |feature| {
-        set(features, feature, true);
-    });
-    vector::for_each(disable, |feature| {
-        set(features, feature, false);
-    });
+
public fun new_accounts_default_to_fa_store_enabled(): bool acquires Features {
+    is_enabled(NEW_ACCOUNTS_DEFAULT_TO_FA_STORE)
 }
 
@@ -3656,13 +3724,13 @@ Helper to check whether a feature flag is enabled. - + -## Function `ensure_framework_signer` +## Function `get_default_account_resource_feature` -
fun ensure_framework_signer(account: &signer)
+
public fun get_default_account_resource_feature(): u64
 
@@ -3671,24 +3739,20 @@ Helper to check whether a feature flag is enabled. Implementation -
fun ensure_framework_signer(account: &signer) {
-    let addr = signer::address_of(account);
-    assert!(addr == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
-}
+
public fun get_default_account_resource_feature(): u64 { DEFAULT_ACCOUNT_RESOURCE }
 
- + -## Function `change_feature_flags_for_verification` +## Function `is_default_account_resource_enabled` -
#[verify_only]
-public fun change_feature_flags_for_verification(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
public fun is_default_account_resource_enabled(): bool
 
@@ -3697,12 +3761,8 @@ Helper to check whether a feature flag is enabled. Implementation -
public fun change_feature_flags_for_verification(
-    framework: &signer,
-    enable: vector<u64>,
-    disable: vector<u64>
-) acquires Features {
-    change_feature_flags_internal(framework, enable, disable)
+
public fun is_default_account_resource_enabled(): bool acquires Features {
+    is_enabled(DEFAULT_ACCOUNT_RESOURCE)
 }
 
@@ -3710,59 +3770,531 @@ Helper to check whether a feature flag is enabled. - - -## Specification - + - +## Function `get_jwk_consensus_per_key_mode_feature` -### Resource `Features` -
struct Features has key
+
public fun get_jwk_consensus_per_key_mode_feature(): u64
 
-
-
-features: vector<u8> -
-
+
+Implementation -
-
+
public fun get_jwk_consensus_per_key_mode_feature(): u64 { JWK_CONSENSUS_PER_KEY_MODE }
+
-
pragma bv=b"0";
-
+ + - +## Function `is_jwk_consensus_per_key_mode_enabled` -### Resource `PendingFeatures` -
struct PendingFeatures has key
+
public fun is_jwk_consensus_per_key_mode_enabled(): bool
 
-
-
-features: vector<u8> -
-
+
+Implementation -
-
+
public fun is_jwk_consensus_per_key_mode_enabled(): bool acquires Features {
+    is_enabled(JWK_CONSENSUS_PER_KEY_MODE)
+}
+
+ + + + + + + +## Function `get_orderless_transactions_feature` + + + +
public fun get_orderless_transactions_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_orderless_transactions_feature(): u64 { ORDERLESS_TRANSACTIONS }
+
+ + + +
+ + + +## Function `orderless_transactions_enabled` + + + +
public fun orderless_transactions_enabled(): bool
+
+ + + +
+Implementation + + +
public fun orderless_transactions_enabled(): bool acquires Features {
+    is_enabled(ORDERLESS_TRANSACTIONS)
+}
+
+ + + +
+ + + +## Function `get_calculate_transaction_fee_for_distribution_feature` + + + +
public fun get_calculate_transaction_fee_for_distribution_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_calculate_transaction_fee_for_distribution_feature(): u64 { CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION }
+
+ + + +
+ -
pragma bv=b"0";
+## Function `is_calculate_transaction_fee_for_distribution_enabled`
+
+
+
+
public fun is_calculate_transaction_fee_for_distribution_enabled(): bool
+
+ + + +
+Implementation + + +
public fun is_calculate_transaction_fee_for_distribution_enabled(): bool acquires Features {
+    is_enabled(CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION)
+}
+
+ + + +
+ + + +## Function `get_distribute_transaction_fee_feature` + + + +
public fun get_distribute_transaction_fee_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_distribute_transaction_fee_feature(): u64 { DISTRIBUTE_TRANSACTION_FEE }
+
+ + + +
+ + + +## Function `is_distribute_transaction_fee_enabled` + + + +
public fun is_distribute_transaction_fee_enabled(): bool
+
+ + + +
+Implementation + + +
public fun is_distribute_transaction_fee_enabled(): bool acquires Features {
+    is_enabled(DISTRIBUTE_TRANSACTION_FEE)
+}
+
+ + + +
+ + + +## Function `change_feature_flags` + +Deprecated to prevent validator set changes during DKG. + +Genesis/tests should use change_feature_flags_internal() for feature vec initialization. + +Governance proposals should use change_feature_flags_for_next_epoch() to enable/disable features. + + +
public fun change_feature_flags(_framework: &signer, _enable: vector<u64>, _disable: vector<u64>)
+
+ + + +
+Implementation + + +
public fun change_feature_flags(_framework: &signer, _enable: vector<u64>, _disable: vector<u64>) {
+    abort (error::invalid_state(EAPI_DISABLED))
+}
+
+ + + +
+ + + +## Function `change_feature_flags_internal` + +Update feature flags directly. Only used in genesis/tests. + + +
fun change_feature_flags_internal(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
fun change_feature_flags_internal(framework: &signer, enable: vector<u64>, disable: vector<u64>) acquires Features {
+    assert!(signer::address_of(framework) == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
+    if (!exists<Features>(@std)) {
+        move_to<Features>(framework, Features { features: vector[] })
+    };
+    let features = &mut Features[@std].features;
+    enable.for_each_ref(|feature| {
+        set(features, *feature, true);
+    });
+    disable.for_each_ref(|feature| {
+        set(features, *feature, false);
+    });
+}
+
+ + + +
+ + + +## Function `change_feature_flags_for_next_epoch` + +Enable and disable features for the next epoch. + + +
public fun change_feature_flags_for_next_epoch(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
public fun change_feature_flags_for_next_epoch(
+    framework: &signer,
+    enable: vector<u64>,
+    disable: vector<u64>
+) acquires PendingFeatures, Features {
+    assert!(signer::address_of(framework) == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
+
+    // Figure out the baseline feature vec that the diff will be applied to.
+    let new_feature_vec = if (exists<PendingFeatures>(@std)) {
+        // If there is a buffered feature vec, use it as the baseline.
+        let PendingFeatures { features } = move_from<PendingFeatures>(@std);
+        features
+    } else if (exists<Features>(@std)) {
+        // Otherwise, use the currently effective feature flag vec as the baseline, if it exists.
+        Features[@std].features
+    } else {
+        // Otherwise, use an empty feature vec.
+        vector[]
+    };
+
+    // Apply the diff and save it to the buffer.
+    apply_diff(&mut new_feature_vec, enable, disable);
+    move_to(framework, PendingFeatures { features: new_feature_vec });
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Apply all the pending feature flag changes. Should only be used at the end of a reconfiguration with DKG. + +While the scope is public, it can only be usd in system transactions like block_prologue and governance proposals, +who have permission to set the flag that's checked in extract(). + + +
public fun on_new_epoch(framework: &signer)
+
+ + + +
+Implementation + + +
public fun on_new_epoch(framework: &signer) acquires Features, PendingFeatures {
+    ensure_framework_signer(framework);
+    if (exists<PendingFeatures>(@std)) {
+        let PendingFeatures { features } = move_from<PendingFeatures>(@std);
+        if (exists<Features>(@std)) {
+            Features[@std].features = features;
+        } else {
+            move_to(framework, Features { features })
+        }
+    }
+}
+
+ + + +
+ + + +## Function `is_enabled` + +Check whether the feature is enabled. + + +
#[view]
+public fun is_enabled(feature: u64): bool
+
+ + + +
+Implementation + + +
public fun is_enabled(feature: u64): bool acquires Features {
+    exists<Features>(@std) &&
+        contains(&Features[@std].features, feature)
+}
+
+ + + +
+ + + +## Function `set` + +Helper to include or exclude a feature flag. + + +
fun set(features: &mut vector<u8>, feature: u64, include: bool)
+
+ + + +
+Implementation + + +
fun set(features: &mut vector<u8>, feature: u64, include: bool) {
+    let byte_index = feature / 8;
+    let bit_mask = 1 << ((feature % 8) as u8);
+    while (features.length() <= byte_index) {
+        features.push_back(0)
+    };
+
+    if (include)
+        features[byte_index] |= bit_mask
+    else
+        features[byte_index] &= (0xff ^ bit_mask)
+}
+
+ + + +
+ + + +## Function `contains` + +Helper to check whether a feature flag is enabled. + + +
fun contains(features: &vector<u8>, feature: u64): bool
+
+ + + +
+Implementation + + +
fun contains(features: &vector<u8>, feature: u64): bool {
+    let byte_index = feature / 8;
+    let bit_mask = 1 << ((feature % 8) as u8);
+    byte_index < features.length() && (features[byte_index] & bit_mask) != 0
+}
+
+ + + +
+ + + +## Function `apply_diff` + + + +
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>) {
+    enable.for_each(|feature| {
+        set(features, feature, true);
+    });
+    disable.for_each(|feature| {
+        set(features, feature, false);
+    });
+}
+
+ + + +
+ + + +## Function `ensure_framework_signer` + + + +
fun ensure_framework_signer(account: &signer)
+
+ + + +
+Implementation + + +
fun ensure_framework_signer(account: &signer) {
+    let addr = signer::address_of(account);
+    assert!(addr == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
+}
+
+ + + +
+ + + +## Specification + + + + +### Resource `Features` + + +
struct Features has key
+
+ + + +
+
+features: vector<u8> +
+
+ +
+
+ + + +
pragma bv = b"0";
+
+ + + + + +### Resource `PendingFeatures` + + +
struct PendingFeatures has key
+
+ + + +
+
+features: vector<u8> +
+
+ +
+
+ + + +
pragma bv = b"0";
 
@@ -3843,6 +4375,39 @@ Helper to check whether a feature flag is enabled. + + + + +
fun spec_new_accounts_default_to_fa_apt_store_enabled(): bool {
+   spec_is_enabled(NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE)
+}
+
+ + + + + + + +
fun spec_new_accounts_default_to_fa_store_enabled(): bool {
+   spec_is_enabled(NEW_ACCOUNTS_DEFAULT_TO_FA_STORE)
+}
+
+ + + + + + + +
fun spec_simulation_enhancement_enabled(): bool {
+   spec_is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT)
+}
+
+ + + ### Function `abort_if_multisig_payload_mismatch_enabled` @@ -3861,6 +4426,24 @@ Helper to check whether a feature flag is enabled. + + +### Function `is_default_account_resource_enabled` + + +
public fun is_default_account_resource_enabled(): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_is_enabled(DEFAULT_ACCOUNT_RESOURCE);
+
+ + + ### Function `change_feature_flags_internal` @@ -3903,7 +4486,9 @@ Helper to check whether a feature flag is enabled.
fun spec_contains(features: vector<u8>, feature: u64): bool {
-   ((int2bv((((1 as u8) << ((feature % (8 as u64)) as u64)) as u8)) as u8) & features[feature/8] as u8) > (0 as u8)
+   ((int2bv(
+       (((1 as u8) << ((feature % (8 as u64)) as u64)) as u8)
+   ) as u8) & features[feature / 8] as u8) > (0 as u8)
        && (feature / 8) < len(features)
 }
 
@@ -3992,17 +4577,6 @@ Helper to check whether a feature flag is enabled. - - - -
fun spec_collect_and_distribute_gas_fees_enabled(): bool {
-   spec_is_enabled(COLLECT_AND_DISTRIBUTE_GAS_FEES)
-}
-
- - - - @@ -4024,7 +4598,7 @@ Helper to check whether a feature flag is enabled. -
pragma bv=b"0";
+
pragma bv = b"0";
 aborts_if false;
 ensures feature / 8 < len(features);
 ensures include == spec_contains(features, feature);
@@ -4043,7 +4617,7 @@ Helper to check whether a feature flag is enabled.
 
 
 
-
pragma bv=b"0";
+
pragma bv = b"0";
 aborts_if false;
 ensures result == spec_contains(features, feature);
 
diff --git a/aptos-move/framework/move-stdlib/doc/fixed_point32.md b/aptos-move/framework/move-stdlib/doc/fixed_point32.md index ee8010e510ccc..9dcfaad3e8afa 100644 --- a/aptos-move/framework/move-stdlib/doc/fixed_point32.md +++ b/aptos-move/framework/move-stdlib/doc/fixed_point32.md @@ -166,7 +166,7 @@ overflows. let product = unscaled_product >> 32; // Check whether the value is too large. assert!(product <= MAX_U64, EMULTIPLICATION); - (product as u64) + product as u64 }
@@ -291,7 +291,7 @@ adding or subtracting FixedPoint32 values, can be done using the raw values directly. -
public fun get_raw_value(num: fixed_point32::FixedPoint32): u64
+
public fun get_raw_value(self: fixed_point32::FixedPoint32): u64
 
@@ -300,8 +300,8 @@ values directly. Implementation -
public fun get_raw_value(num: FixedPoint32): u64 {
-    num.value
+
public fun get_raw_value(self: FixedPoint32): u64 {
+    self.value
 }
 
@@ -316,7 +316,7 @@ values directly. Returns true if the ratio is zero. -
public fun is_zero(num: fixed_point32::FixedPoint32): bool
+
public fun is_zero(self: fixed_point32::FixedPoint32): bool
 
@@ -325,8 +325,8 @@ Returns true if the ratio is zero. Implementation -
public fun is_zero(num: FixedPoint32): bool {
-    num.value == 0
+
public fun is_zero(self: FixedPoint32): bool {
+    self.value == 0
 }
 
@@ -426,7 +426,7 @@ Create a fixedpoint value from a u64 value. Returns the largest integer less than or equal to a given number. -
public fun floor(num: fixed_point32::FixedPoint32): u64
+
public fun floor(self: fixed_point32::FixedPoint32): u64
 
@@ -435,8 +435,8 @@ Returns the largest integer less than or equal to a given number. Implementation -
public fun floor(num: FixedPoint32): u64 {
-    num.value >> 32
+
public fun floor(self: FixedPoint32): u64 {
+    self.value >> 32
 }
 
@@ -451,7 +451,7 @@ Returns the largest integer less than or equal to a given number. Rounds up the given FixedPoint32 to the next largest integer. -
public fun ceil(num: fixed_point32::FixedPoint32): u64
+
public fun ceil(self: fixed_point32::FixedPoint32): u64
 
@@ -460,9 +460,9 @@ Rounds up the given FixedPoint32 to the next largest integer. Implementation -
public fun ceil(num: FixedPoint32): u64 {
-    let floored_num = floor(num) << 32;
-    if (num.value == floored_num) {
+
public fun ceil(self: FixedPoint32): u64 {
+    let floored_num = self.floor() << 32;
+    if (self.value == floored_num) {
         return floored_num >> 32
     };
     let val = ((floored_num as u128) + (1 << 32));
@@ -481,7 +481,7 @@ Rounds up the given FixedPoint32 to the next largest integer.
 Returns the value of a FixedPoint32 to the nearest integer.
 
 
-
public fun round(num: fixed_point32::FixedPoint32): u64
+
public fun round(self: fixed_point32::FixedPoint32): u64
 
@@ -490,13 +490,13 @@ Returns the value of a FixedPoint32 to the nearest integer. Implementation -
public fun round(num: FixedPoint32): u64 {
-    let floored_num = floor(num) << 32;
+
public fun round(self: FixedPoint32): u64 {
+    let floored_num = self.floor() << 32;
     let boundary = floored_num + ((1 << 32) / 2);
-    if (num.value < boundary) {
+    if (self.value < boundary) {
         floored_num >> 32
     } else {
-        ceil(num)
+        self.ceil()
     }
 }
 
@@ -780,7 +780,7 @@ Returns the value of a FixedPoint32 to the nearest integer. ### Function `floor` -
public fun floor(num: fixed_point32::FixedPoint32): u64
+
public fun floor(self: fixed_point32::FixedPoint32): u64
 
@@ -788,7 +788,7 @@ Returns the value of a FixedPoint32 to the nearest integer.
pragma opaque;
 aborts_if false;
-ensures result == spec_floor(num);
+ensures result == spec_floor(self);
 
@@ -797,12 +797,12 @@ Returns the value of a FixedPoint32 to the nearest integer. -
fun spec_floor(val: FixedPoint32): u64 {
-   let fractional = val.value % (1 << 32);
+
fun spec_floor(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
    if (fractional == 0) {
-       val.value >> 32
+       self.value >> 32
    } else {
-       (val.value - fractional) >> 32
+       (self.value - fractional) >> 32
    }
 }
 
@@ -814,7 +814,7 @@ Returns the value of a FixedPoint32 to the nearest integer. ### Function `ceil` -
public fun ceil(num: fixed_point32::FixedPoint32): u64
+
public fun ceil(self: fixed_point32::FixedPoint32): u64
 
@@ -823,7 +823,7 @@ Returns the value of a FixedPoint32 to the nearest integer.
pragma verify_duration_estimate = 120;
 pragma opaque;
 aborts_if false;
-ensures result == spec_ceil(num);
+ensures result == spec_ceil(self);
 
@@ -832,13 +832,13 @@ Returns the value of a FixedPoint32 to the nearest integer. -
fun spec_ceil(val: FixedPoint32): u64 {
-   let fractional = val.value % (1 << 32);
+
fun spec_ceil(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
    let one = 1 << 32;
    if (fractional == 0) {
-       val.value >> 32
+       self.value >> 32
    } else {
-       (val.value - fractional + one) >> 32
+       (self.value - fractional + one) >> 32
    }
 }
 
@@ -850,7 +850,7 @@ Returns the value of a FixedPoint32 to the nearest integer. ### Function `round` -
public fun round(num: fixed_point32::FixedPoint32): u64
+
public fun round(self: fixed_point32::FixedPoint32): u64
 
@@ -859,7 +859,7 @@ Returns the value of a FixedPoint32 to the nearest integer.
pragma verify_duration_estimate = 120;
 pragma opaque;
 aborts_if false;
-ensures result == spec_round(num);
+ensures result == spec_round(self);
 
@@ -868,14 +868,14 @@ Returns the value of a FixedPoint32 to the nearest integer. -
fun spec_round(val: FixedPoint32): u64 {
-   let fractional = val.value % (1 << 32);
+
fun spec_round(self: FixedPoint32): u64 {
+   let fractional = self.value % (1 << 32);
    let boundary = (1 << 32) / 2;
    let one = 1 << 32;
    if (fractional < boundary) {
-       (val.value - fractional) >> 32
+       (self.value - fractional) >> 32
    } else {
-       (val.value - fractional + one) >> 32
+       (self.value - fractional + one) >> 32
    }
 }
 
diff --git a/aptos-move/framework/move-stdlib/doc/mem.md b/aptos-move/framework/move-stdlib/doc/mem.md new file mode 100644 index 0000000000000..6f5c78f44218e --- /dev/null +++ b/aptos-move/framework/move-stdlib/doc/mem.md @@ -0,0 +1,115 @@ + + + +# Module `0x1::mem` + +Module with methods for safe memory manipulation. + + +- [Function `swap`](#0x1_mem_swap) +- [Function `replace`](#0x1_mem_replace) +- [Specification](#@Specification_0) + - [Function `swap`](#@Specification_0_swap) + - [Function `replace`](#@Specification_0_replace) + + +
+ + + + + +## Function `swap` + +Swap contents of two passed mutable references. + +Move prevents from having two mutable references to the same value, +so left and right references are always distinct. + + +
public(friend) fun swap<T>(left: &mut T, right: &mut T)
+
+ + + +
+Implementation + + +
native friend fun swap<T>(left: &mut T, right: &mut T);
+
+ + + +
+ + + +## Function `replace` + +Replace the value reference points to with the given new value, +and return the value it had before. + + +
public(friend) fun replace<T>(ref: &mut T, new: T): T
+
+ + + +
+Implementation + + +
friend fun replace<T>(ref: &mut T, new: T): T {
+    swap(ref, &mut new);
+    new
+}
+
+ + + +
+ + + +## Specification + + + + +### Function `swap` + + +
public(friend) fun swap<T>(left: &mut T, right: &mut T)
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures right == old(left);
+ensures left == old(right);
+
+ + + + + +### Function `replace` + + +
public(friend) fun replace<T>(ref: &mut T, new: T): T
+
+ + + + +
pragma opaque;
+aborts_if false;
+ensures result == old(ref);
+ensures ref == new;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/doc/option.md b/aptos-move/framework/move-stdlib/doc/option.md index 914a948f5c978..b692bc09d3415 100644 --- a/aptos-move/framework/move-stdlib/doc/option.md +++ b/aptos-move/framework/move-stdlib/doc/option.md @@ -195,7 +195,7 @@ Return an Option containi
public fun from_vec<Element>(vec: vector<Element>): Option<Element> {
-    assert!(vector::length(&vec) <= 1, EOPTION_VEC_TOO_LONG);
+    assert!(vec.length() <= 1, EOPTION_VEC_TOO_LONG);
     Option { vec }
 }
 
@@ -208,10 +208,10 @@ Return an Option containi ## Function `is_none` -Return true if t does not hold a value +Return true if self does not hold a value -
public fun is_none<Element>(t: &option::Option<Element>): bool
+
public fun is_none<Element>(self: &option::Option<Element>): bool
 
@@ -220,8 +220,8 @@ Return true if t does not hold a value Implementation -
public fun is_none<Element>(t: &Option<Element>): bool {
-    vector::is_empty(&t.vec)
+
public fun is_none<Element>(self: &Option<Element>): bool {
+    self.vec.is_empty()
 }
 
@@ -233,10 +233,10 @@ Return true if t does not hold a value ## Function `is_some` -Return true if t holds a value +Return true if self holds a value -
public fun is_some<Element>(t: &option::Option<Element>): bool
+
public fun is_some<Element>(self: &option::Option<Element>): bool
 
@@ -245,8 +245,8 @@ Return true if t holds a value Implementation -
public fun is_some<Element>(t: &Option<Element>): bool {
-    !vector::is_empty(&t.vec)
+
public fun is_some<Element>(self: &Option<Element>): bool {
+    !self.vec.is_empty()
 }
 
@@ -258,11 +258,11 @@ Return true if t holds a value ## Function `contains` -Return true if the value in t is equal to e_ref -Always returns false if t does not hold a value +Return true if the value in self is equal to e_ref +Always returns false if self does not hold a value -
public fun contains<Element>(t: &option::Option<Element>, e_ref: &Element): bool
+
public fun contains<Element>(self: &option::Option<Element>, e_ref: &Element): bool
 
@@ -271,8 +271,8 @@ Always returns false if t does not hold a value Implementation -
public fun contains<Element>(t: &Option<Element>, e_ref: &Element): bool {
-    vector::contains(&t.vec, e_ref)
+
public fun contains<Element>(self: &Option<Element>, e_ref: &Element): bool {
+    self.vec.contains(e_ref)
 }
 
@@ -284,11 +284,11 @@ Always returns false if t does not hold a value ## Function `borrow` -Return an immutable reference to the value inside t -Aborts if t does not hold a value +Return an immutable reference to the value inside self +Aborts if self does not hold a value -
public fun borrow<Element>(t: &option::Option<Element>): &Element
+
public fun borrow<Element>(self: &option::Option<Element>): &Element
 
@@ -297,9 +297,9 @@ Aborts if t does not hold a value Implementation -
public fun borrow<Element>(t: &Option<Element>): &Element {
-    assert!(is_some(t), EOPTION_NOT_SET);
-    vector::borrow(&t.vec, 0)
+
public fun borrow<Element>(self: &Option<Element>): &Element {
+    assert!(self.is_some(), EOPTION_NOT_SET);
+    &self.vec[0]
 }
 
@@ -311,11 +311,11 @@ Aborts if t does not hold a value ## Function `borrow_with_default` -Return a reference to the value inside t if it holds one -Return default_ref if t does not hold a value +Return a reference to the value inside self if it holds one +Return default_ref if self does not hold a value -
public fun borrow_with_default<Element>(t: &option::Option<Element>, default_ref: &Element): &Element
+
public fun borrow_with_default<Element>(self: &option::Option<Element>, default_ref: &Element): &Element
 
@@ -324,10 +324,10 @@ Return default_ref if t does not hold a value Implementation -
public fun borrow_with_default<Element>(t: &Option<Element>, default_ref: &Element): &Element {
-    let vec_ref = &t.vec;
-    if (vector::is_empty(vec_ref)) default_ref
-    else vector::borrow(vec_ref, 0)
+
public fun borrow_with_default<Element>(self: &Option<Element>, default_ref: &Element): &Element {
+    let vec_ref = &self.vec;
+    if (vec_ref.is_empty()) default_ref
+    else &vec_ref[0]
 }
 
@@ -339,11 +339,11 @@ Return default_ref if t does not hold a value ## Function `get_with_default` -Return the value inside t if it holds one -Return default if t does not hold a value +Return the value inside self if it holds one +Return default if self does not hold a value -
public fun get_with_default<Element: copy, drop>(t: &option::Option<Element>, default: Element): Element
+
public fun get_with_default<Element: copy, drop>(self: &option::Option<Element>, default: Element): Element
 
@@ -353,12 +353,12 @@ Return default if t does not hold a value
public fun get_with_default<Element: copy + drop>(
-    t: &Option<Element>,
+    self: &Option<Element>,
     default: Element,
 ): Element {
-    let vec_ref = &t.vec;
-    if (vector::is_empty(vec_ref)) default
-    else *vector::borrow(vec_ref, 0)
+    let vec_ref = &self.vec;
+    if (vec_ref.is_empty()) default
+    else vec_ref[0]
 }
 
@@ -370,11 +370,11 @@ Return default if t does not hold a value ## Function `fill` -Convert the none option t to a some option by adding e. -Aborts if t already holds a value +Convert the none option self to a some option by adding e. +Aborts if self already holds a value -
public fun fill<Element>(t: &mut option::Option<Element>, e: Element)
+
public fun fill<Element>(self: &mut option::Option<Element>, e: Element)
 
@@ -383,9 +383,9 @@ Aborts if t already holds a value Implementation -
public fun fill<Element>(t: &mut Option<Element>, e: Element) {
-    let vec_ref = &mut t.vec;
-    if (vector::is_empty(vec_ref)) vector::push_back(vec_ref, e)
+
public fun fill<Element>(self: &mut Option<Element>, e: Element) {
+    let vec_ref = &mut self.vec;
+    if (vec_ref.is_empty()) vec_ref.push_back(e)
     else abort EOPTION_IS_SET
 }
 
@@ -398,11 +398,11 @@ Aborts if t already holds a value ## Function `extract` -Convert a some option to a none by removing and returning the value stored inside t -Aborts if t does not hold a value +Convert a some option to a none by removing and returning the value stored inside self +Aborts if self does not hold a value -
public fun extract<Element>(t: &mut option::Option<Element>): Element
+
public fun extract<Element>(self: &mut option::Option<Element>): Element
 
@@ -411,9 +411,9 @@ Aborts if t does not hold a value Implementation -
public fun extract<Element>(t: &mut Option<Element>): Element {
-    assert!(is_some(t), EOPTION_NOT_SET);
-    vector::pop_back(&mut t.vec)
+
public fun extract<Element>(self: &mut Option<Element>): Element {
+    assert!(self.is_some(), EOPTION_NOT_SET);
+    self.vec.pop_back()
 }
 
@@ -425,11 +425,11 @@ Aborts if t does not hold a value ## Function `borrow_mut` -Return a mutable reference to the value inside t -Aborts if t does not hold a value +Return a mutable reference to the value inside self +Aborts if self does not hold a value -
public fun borrow_mut<Element>(t: &mut option::Option<Element>): &mut Element
+
public fun borrow_mut<Element>(self: &mut option::Option<Element>): &mut Element
 
@@ -438,9 +438,9 @@ Aborts if t does not hold a value Implementation -
public fun borrow_mut<Element>(t: &mut Option<Element>): &mut Element {
-    assert!(is_some(t), EOPTION_NOT_SET);
-    vector::borrow_mut(&mut t.vec, 0)
+
public fun borrow_mut<Element>(self: &mut Option<Element>): &mut Element {
+    assert!(self.is_some(), EOPTION_NOT_SET);
+    self.vec.borrow_mut(0)
 }
 
@@ -452,11 +452,11 @@ Aborts if t does not hold a value ## Function `swap` -Swap the old value inside t with e and return the old value -Aborts if t does not hold a value +Swap the old value inside self with e and return the old value +Aborts if self does not hold a value -
public fun swap<Element>(t: &mut option::Option<Element>, e: Element): Element
+
public fun swap<Element>(self: &mut option::Option<Element>, e: Element): Element
 
@@ -465,11 +465,11 @@ Aborts if t does not hold a value Implementation -
public fun swap<Element>(t: &mut Option<Element>, e: Element): Element {
-    assert!(is_some(t), EOPTION_NOT_SET);
-    let vec_ref = &mut t.vec;
-    let old_value = vector::pop_back(vec_ref);
-    vector::push_back(vec_ref, e);
+
public fun swap<Element>(self: &mut Option<Element>, e: Element): Element {
+    assert!(self.is_some(), EOPTION_NOT_SET);
+    let vec_ref = &mut self.vec;
+    let old_value = vec_ref.pop_back();
+    vec_ref.push_back(e);
     old_value
 }
 
@@ -482,12 +482,12 @@ Aborts if t does not hold a value ## Function `swap_or_fill` -Swap the old value inside t with e and return the old value; +Swap the old value inside self with e and return the old value; or if there is no old value, fill it with e. -Different from swap(), swap_or_fill() allows for t not holding a value. +Different from swap(), swap_or_fill() allows for self not holding a value. -
public fun swap_or_fill<Element>(t: &mut option::Option<Element>, e: Element): option::Option<Element>
+
public fun swap_or_fill<Element>(self: &mut option::Option<Element>, e: Element): option::Option<Element>
 
@@ -496,11 +496,11 @@ Different from swap(), swap_or_fill() allows for t not holding a va Implementation -
public fun swap_or_fill<Element>(t: &mut Option<Element>, e: Element): Option<Element> {
-    let vec_ref = &mut t.vec;
-    let old_value = if (vector::is_empty(vec_ref)) none()
-        else some(vector::pop_back(vec_ref));
-    vector::push_back(vec_ref, e);
+
public fun swap_or_fill<Element>(self: &mut Option<Element>, e: Element): Option<Element> {
+    let vec_ref = &mut self.vec;
+    let old_value = if (vec_ref.is_empty()) none()
+        else some(vec_ref.pop_back());
+    vec_ref.push_back(e);
     old_value
 }
 
@@ -513,10 +513,10 @@ Different from swap(), swap_or_fill() allows for t not holding a va ## Function `destroy_with_default` -Destroys t. If t holds a value, return it. Returns default otherwise +Destroys self. If self holds a value, return it. Returns default otherwise -
public fun destroy_with_default<Element: drop>(t: option::Option<Element>, default: Element): Element
+
public fun destroy_with_default<Element: drop>(self: option::Option<Element>, default: Element): Element
 
@@ -525,10 +525,10 @@ Destroys t. If t holds a value, return it. Returns Implementation -
public fun destroy_with_default<Element: drop>(t: Option<Element>, default: Element): Element {
-    let Option { vec } = t;
-    if (vector::is_empty(&mut vec)) default
-    else vector::pop_back(&mut vec)
+
public fun destroy_with_default<Element: drop>(self: Option<Element>, default: Element): Element {
+    let Option { vec } = self;
+    if (vec.is_empty()) default
+    else vec.pop_back()
 }
 
@@ -540,11 +540,11 @@ Destroys t. If t holds a value, return it. Returns t
and return its contents -Aborts if t does not hold a value +Unpack self and return its contents +Aborts if self does not hold a value -
public fun destroy_some<Element>(t: option::Option<Element>): Element
+
public fun destroy_some<Element>(self: option::Option<Element>): Element
 
@@ -553,11 +553,11 @@ Aborts if t does not hold a value Implementation -
public fun destroy_some<Element>(t: Option<Element>): Element {
-    assert!(is_some(&t), EOPTION_NOT_SET);
-    let Option { vec } = t;
-    let elem = vector::pop_back(&mut vec);
-    vector::destroy_empty(vec);
+
public fun destroy_some<Element>(self: Option<Element>): Element {
+    assert!(self.is_some(), EOPTION_NOT_SET);
+    let Option { vec } = self;
+    let elem = vec.pop_back();
+    vec.destroy_empty();
     elem
 }
 
@@ -570,11 +570,11 @@ Aborts if t does not hold a value ## Function `destroy_none` -Unpack t -Aborts if t holds a value +Unpack self +Aborts if self holds a value -
public fun destroy_none<Element>(t: option::Option<Element>)
+
public fun destroy_none<Element>(self: option::Option<Element>)
 
@@ -583,10 +583,10 @@ Aborts if t holds a value Implementation -
public fun destroy_none<Element>(t: Option<Element>) {
-    assert!(is_none(&t), EOPTION_IS_SET);
-    let Option { vec } = t;
-    vector::destroy_empty(vec)
+
public fun destroy_none<Element>(self: Option<Element>) {
+    assert!(self.is_none(), EOPTION_IS_SET);
+    let Option { vec } = self;
+    vec.destroy_empty()
 }
 
@@ -598,11 +598,11 @@ Aborts if t holds a value ## Function `to_vec` -Convert t into a vector of length 1 if it is Some, +Convert self into a vector of length 1 if it is Some, and an empty vector otherwise -
public fun to_vec<Element>(t: option::Option<Element>): vector<Element>
+
public fun to_vec<Element>(self: option::Option<Element>): vector<Element>
 
@@ -611,8 +611,8 @@ and an empty vector otherwise Implementation -
public fun to_vec<Element>(t: Option<Element>): vector<Element> {
-    let Option { vec } = t;
+
public fun to_vec<Element>(self: Option<Element>): vector<Element> {
+    let Option { vec } = self;
     vec
 }
 
@@ -628,7 +628,7 @@ and an empty vector otherwise Apply the function to the optional element, consuming it. Does nothing if no value present. -
public fun for_each<Element>(o: option::Option<Element>, f: |Element|)
+
public fun for_each<Element>(self: option::Option<Element>, f: |Element|)
 
@@ -637,11 +637,11 @@ Apply the function to the optional element, consuming it. Does nothing if no val Implementation -
public inline fun for_each<Element>(o: Option<Element>, f: |Element|) {
-    if (is_some(&o)) {
-        f(destroy_some(o))
+
public inline fun for_each<Element>(self: Option<Element>, f: |Element|) {
+    if (self.is_some()) {
+        f(self.destroy_some())
     } else {
-        destroy_none(o)
+        self.destroy_none()
     }
 }
 
@@ -657,7 +657,7 @@ Apply the function to the optional element, consuming it. Does nothing if no val Apply the function to the optional element reference. Does nothing if no value present. -
public fun for_each_ref<Element>(o: &option::Option<Element>, f: |&Element|)
+
public fun for_each_ref<Element>(self: &option::Option<Element>, f: |&Element|)
 
@@ -666,9 +666,9 @@ Apply the function to the optional element reference. Does nothing if no value p Implementation -
public inline fun for_each_ref<Element>(o: &Option<Element>, f: |&Element|) {
-    if (is_some(o)) {
-        f(borrow(o))
+
public inline fun for_each_ref<Element>(self: &Option<Element>, f: |&Element|) {
+    if (self.is_some()) {
+        f(self.borrow())
     }
 }
 
@@ -684,7 +684,7 @@ Apply the function to the optional element reference. Does nothing if no value p Apply the function to the optional element reference. Does nothing if no value present. -
public fun for_each_mut<Element>(o: &mut option::Option<Element>, f: |&mut Element|)
+
public fun for_each_mut<Element>(self: &mut option::Option<Element>, f: |&mut Element|)
 
@@ -693,9 +693,9 @@ Apply the function to the optional element reference. Does nothing if no value p Implementation -
public inline fun for_each_mut<Element>(o: &mut Option<Element>, f: |&mut Element|) {
-    if (is_some(o)) {
-        f(borrow_mut(o))
+
public inline fun for_each_mut<Element>(self: &mut Option<Element>, f: |&mut Element|) {
+    if (self.is_some()) {
+        f(self.borrow_mut())
     }
 }
 
@@ -711,7 +711,7 @@ Apply the function to the optional element reference. Does nothing if no value p Folds the function over the optional element. -
public fun fold<Accumulator, Element>(o: option::Option<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
+
public fun fold<Accumulator, Element>(self: option::Option<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
 
@@ -721,14 +721,14 @@ Folds the function over the optional element.
public inline fun fold<Accumulator, Element>(
-    o: Option<Element>,
+    self: Option<Element>,
     init: Accumulator,
     f: |Accumulator,Element|Accumulator
 ): Accumulator {
-    if (is_some(&o)) {
-        f(init, destroy_some(o))
+    if (self.is_some()) {
+        f(init, self.destroy_some())
     } else {
-        destroy_none(o);
+        self.destroy_none();
         init
     }
 }
@@ -745,7 +745,7 @@ Folds the function over the optional element.
 Maps the content of an option.
 
 
-
public fun map<Element, OtherElement>(o: option::Option<Element>, f: |Element|OtherElement): option::Option<OtherElement>
+
public fun map<Element, OtherElement>(self: option::Option<Element>, f: |Element|OtherElement): option::Option<OtherElement>
 
@@ -754,11 +754,11 @@ Maps the content of an option. Implementation -
public inline fun map<Element, OtherElement>(o: Option<Element>, f: |Element|OtherElement): Option<OtherElement> {
-    if (is_some(&o)) {
-        some(f(destroy_some(o)))
+
public inline fun map<Element, OtherElement>(self: Option<Element>, f: |Element|OtherElement): Option<OtherElement> {
+    if (self.is_some()) {
+        some(f(self.destroy_some()))
     } else {
-        destroy_none(o);
+        self.destroy_none();
         none()
     }
 }
@@ -775,7 +775,7 @@ Maps the content of an option.
 Maps the content of an option without destroying the original option.
 
 
-
public fun map_ref<Element, OtherElement>(o: &option::Option<Element>, f: |&Element|OtherElement): option::Option<OtherElement>
+
public fun map_ref<Element, OtherElement>(self: &option::Option<Element>, f: |&Element|OtherElement): option::Option<OtherElement>
 
@@ -785,9 +785,9 @@ Maps the content of an option without destroying the original option.
public inline fun map_ref<Element, OtherElement>(
-    o: &Option<Element>, f: |&Element|OtherElement): Option<OtherElement> {
-    if (is_some(o)) {
-        some(f(borrow(o)))
+    self: &Option<Element>, f: |&Element|OtherElement): Option<OtherElement> {
+    if (self.is_some()) {
+        some(f(self.borrow()))
     } else {
         none()
     }
@@ -805,7 +805,7 @@ Maps the content of an option without destroying the original option.
 Filters the content of an option
 
 
-
public fun filter<Element: drop>(o: option::Option<Element>, f: |&Element|bool): option::Option<Element>
+
public fun filter<Element: drop>(self: option::Option<Element>, f: |&Element|bool): option::Option<Element>
 
@@ -814,9 +814,9 @@ Filters the content of an option Implementation -
public inline fun filter<Element:drop>(o: Option<Element>, f: |&Element|bool): Option<Element> {
-    if (is_some(&o) && f(borrow(&o))) {
-        o
+
public inline fun filter<Element:drop>(self: Option<Element>, f: |&Element|bool): Option<Element> {
+    if (self.is_some() && f(self.borrow())) {
+        self
     } else {
         none()
     }
@@ -834,7 +834,7 @@ Filters the content of an option
 Returns true if the option contains an element which satisfies predicate.
 
 
-
public fun any<Element>(o: &option::Option<Element>, p: |&Element|bool): bool
+
public fun any<Element>(self: &option::Option<Element>, p: |&Element|bool): bool
 
@@ -843,8 +843,8 @@ Returns true if the option contains an element which satisfies predicate. Implementation -
public inline fun any<Element>(o: &Option<Element>, p: |&Element|bool): bool {
-    is_some(o) && p(borrow(o))
+
public inline fun any<Element>(self: &Option<Element>, p: |&Element|bool): bool {
+    self.is_some() && p(self.borrow())
 }
 
@@ -859,7 +859,7 @@ Returns true if the option contains an element which satisfies predicate. Utility function to destroy an option that is not droppable. -
public fun destroy<Element>(o: option::Option<Element>, d: |Element|)
+
public fun destroy<Element>(self: option::Option<Element>, d: |Element|)
 
@@ -868,9 +868,9 @@ Utility function to destroy an option that is not droppable. Implementation -
public inline fun destroy<Element>(o: Option<Element>, d: |Element|) {
-    let vec = to_vec(o);
-    vector::destroy(vec, |e| d(e));
+
public inline fun destroy<Element>(self: Option<Element>, d: |Element|) {
+    let vec = self.to_vec();
+    vec.destroy(|e| d(e));
 }
 
@@ -900,8 +900,8 @@ Utility function to destroy an option that is not droppable.
schema AbortsIfNone<Element> {
-    t: Option<Element>;
-    aborts_if spec_is_none(t) with EOPTION_NOT_SET;
+    self: Option<Element>;
+    aborts_if spec_is_none(self) with EOPTION_NOT_SET;
 }
 
@@ -959,7 +959,7 @@ because it's 0 for "none" or 1 for "some".
fun spec_none<Element>(): Option<Element> {
-   Option{ vec: vec() }
+   Option{ vec: vector[] }
 }
 
@@ -1005,7 +1005,7 @@ because it's 0 for "none" or 1 for "some". -
aborts_if vector::length(vec) > 1;
+
aborts_if vec.length() > 1;
 
@@ -1015,7 +1015,7 @@ because it's 0 for "none" or 1 for "some". ### Function `is_none` -
public fun is_none<Element>(t: &option::Option<Element>): bool
+
public fun is_none<Element>(self: &option::Option<Element>): bool
 
@@ -1023,7 +1023,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == spec_is_none(t);
+ensures result == spec_is_none(self);
 
@@ -1032,8 +1032,8 @@ because it's 0 for "none" or 1 for "some". -
fun spec_is_none<Element>(t: Option<Element>): bool {
-   vector::is_empty(t.vec)
+
fun spec_is_none<Element>(self: Option<Element>): bool {
+   self.vec.is_empty()
 }
 
@@ -1044,7 +1044,7 @@ because it's 0 for "none" or 1 for "some". ### Function `is_some` -
public fun is_some<Element>(t: &option::Option<Element>): bool
+
public fun is_some<Element>(self: &option::Option<Element>): bool
 
@@ -1052,7 +1052,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == spec_is_some(t);
+ensures result == spec_is_some(self);
 
@@ -1061,8 +1061,8 @@ because it's 0 for "none" or 1 for "some". -
fun spec_is_some<Element>(t: Option<Element>): bool {
-   !vector::is_empty(t.vec)
+
fun spec_is_some<Element>(self: Option<Element>): bool {
+   !self.vec.is_empty()
 }
 
@@ -1073,7 +1073,7 @@ because it's 0 for "none" or 1 for "some". ### Function `contains` -
public fun contains<Element>(t: &option::Option<Element>, e_ref: &Element): bool
+
public fun contains<Element>(self: &option::Option<Element>, e_ref: &Element): bool
 
@@ -1081,7 +1081,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == spec_contains(t, e_ref);
+ensures result == spec_contains(self, e_ref);
 
@@ -1090,8 +1090,8 @@ because it's 0 for "none" or 1 for "some". -
fun spec_contains<Element>(t: Option<Element>, e: Element): bool {
-   is_some(t) && borrow(t) == e
+
fun spec_contains<Element>(self: Option<Element>, e: Element): bool {
+   self.is_some() && self.borrow() == e
 }
 
@@ -1102,7 +1102,7 @@ because it's 0 for "none" or 1 for "some". ### Function `borrow` -
public fun borrow<Element>(t: &option::Option<Element>): &Element
+
public fun borrow<Element>(self: &option::Option<Element>): &Element
 
@@ -1110,7 +1110,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 include AbortsIfNone<Element>;
-ensures result == spec_borrow(t);
+ensures result == spec_borrow(self);
 
@@ -1119,8 +1119,8 @@ because it's 0 for "none" or 1 for "some". -
fun spec_borrow<Element>(t: Option<Element>): Element {
-   t.vec[0]
+
fun spec_borrow<Element>(self: Option<Element>): Element {
+   self.vec[0]
 }
 
@@ -1131,7 +1131,7 @@ because it's 0 for "none" or 1 for "some". ### Function `borrow_with_default` -
public fun borrow_with_default<Element>(t: &option::Option<Element>, default_ref: &Element): &Element
+
public fun borrow_with_default<Element>(self: &option::Option<Element>, default_ref: &Element): &Element
 
@@ -1139,7 +1139,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == (if (spec_is_some(t)) spec_borrow(t) else default_ref);
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default_ref);
 
@@ -1149,7 +1149,7 @@ because it's 0 for "none" or 1 for "some". ### Function `get_with_default` -
public fun get_with_default<Element: copy, drop>(t: &option::Option<Element>, default: Element): Element
+
public fun get_with_default<Element: copy, drop>(self: &option::Option<Element>, default: Element): Element
 
@@ -1157,7 +1157,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == (if (spec_is_some(t)) spec_borrow(t) else default);
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default);
 
@@ -1167,16 +1167,16 @@ because it's 0 for "none" or 1 for "some". ### Function `fill` -
public fun fill<Element>(t: &mut option::Option<Element>, e: Element)
+
public fun fill<Element>(self: &mut option::Option<Element>, e: Element)
 
pragma opaque;
-aborts_if spec_is_some(t) with EOPTION_IS_SET;
-ensures spec_is_some(t);
-ensures spec_borrow(t) == e;
+aborts_if spec_is_some(self) with EOPTION_IS_SET;
+ensures spec_is_some(self);
+ensures spec_borrow(self) == e;
 
@@ -1186,7 +1186,7 @@ because it's 0 for "none" or 1 for "some". ### Function `extract` -
public fun extract<Element>(t: &mut option::Option<Element>): Element
+
public fun extract<Element>(self: &mut option::Option<Element>): Element
 
@@ -1194,8 +1194,8 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 include AbortsIfNone<Element>;
-ensures result == spec_borrow(old(t));
-ensures spec_is_none(t);
+ensures result == spec_borrow(old(self));
+ensures spec_is_none(self);
 
@@ -1205,15 +1205,15 @@ because it's 0 for "none" or 1 for "some". ### Function `borrow_mut` -
public fun borrow_mut<Element>(t: &mut option::Option<Element>): &mut Element
+
public fun borrow_mut<Element>(self: &mut option::Option<Element>): &mut Element
 
include AbortsIfNone<Element>;
-ensures result == spec_borrow(t);
-ensures t == old(t);
+ensures result == spec_borrow(self);
+ensures self == old(self);
 
@@ -1223,7 +1223,7 @@ because it's 0 for "none" or 1 for "some". ### Function `swap` -
public fun swap<Element>(t: &mut option::Option<Element>, e: Element): Element
+
public fun swap<Element>(self: &mut option::Option<Element>, e: Element): Element
 
@@ -1231,9 +1231,9 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 include AbortsIfNone<Element>;
-ensures result == spec_borrow(old(t));
-ensures spec_is_some(t);
-ensures spec_borrow(t) == e;
+ensures result == spec_borrow(old(self));
+ensures spec_is_some(self);
+ensures spec_borrow(self) == e;
 
@@ -1243,7 +1243,7 @@ because it's 0 for "none" or 1 for "some". ### Function `swap_or_fill` -
public fun swap_or_fill<Element>(t: &mut option::Option<Element>, e: Element): option::Option<Element>
+
public fun swap_or_fill<Element>(self: &mut option::Option<Element>, e: Element): option::Option<Element>
 
@@ -1251,8 +1251,8 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == old(t);
-ensures spec_borrow(t) == e;
+ensures result == old(self);
+ensures spec_borrow(self) == e;
 
@@ -1262,7 +1262,7 @@ because it's 0 for "none" or 1 for "some". ### Function `destroy_with_default` -
public fun destroy_with_default<Element: drop>(t: option::Option<Element>, default: Element): Element
+
public fun destroy_with_default<Element: drop>(self: option::Option<Element>, default: Element): Element
 
@@ -1270,7 +1270,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == (if (spec_is_some(t)) spec_borrow(t) else default);
+ensures result == (if (spec_is_some(self)) spec_borrow(self) else default);
 
@@ -1280,7 +1280,7 @@ because it's 0 for "none" or 1 for "some". ### Function `destroy_some` -
public fun destroy_some<Element>(t: option::Option<Element>): Element
+
public fun destroy_some<Element>(self: option::Option<Element>): Element
 
@@ -1288,7 +1288,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 include AbortsIfNone<Element>;
-ensures result == spec_borrow(t);
+ensures result == spec_borrow(self);
 
@@ -1298,14 +1298,14 @@ because it's 0 for "none" or 1 for "some". ### Function `destroy_none` -
public fun destroy_none<Element>(t: option::Option<Element>)
+
public fun destroy_none<Element>(self: option::Option<Element>)
 
pragma opaque;
-aborts_if spec_is_some(t) with EOPTION_IS_SET;
+aborts_if spec_is_some(self) with EOPTION_IS_SET;
 
@@ -1315,7 +1315,7 @@ because it's 0 for "none" or 1 for "some". ### Function `to_vec` -
public fun to_vec<Element>(t: option::Option<Element>): vector<Element>
+
public fun to_vec<Element>(self: option::Option<Element>): vector<Element>
 
@@ -1323,7 +1323,7 @@ because it's 0 for "none" or 1 for "some".
pragma opaque;
 aborts_if false;
-ensures result == t.vec;
+ensures result == self.vec;
 
diff --git a/aptos-move/framework/move-stdlib/doc/overview.md b/aptos-move/framework/move-stdlib/doc/overview.md index 8eb0c67f05113..3c93f83875d10 100644 --- a/aptos-move/framework/move-stdlib/doc/overview.md +++ b/aptos-move/framework/move-stdlib/doc/overview.md @@ -16,10 +16,12 @@ For on overview of the Move language, see the [Move Book][move-book]. - [`0x1::acl`](acl.md#0x1_acl) - [`0x1::bcs`](bcs.md#0x1_bcs) - [`0x1::bit_vector`](bit_vector.md#0x1_bit_vector) +- [`0x1::cmp`](cmp.md#0x1_cmp) - [`0x1::error`](error.md#0x1_error) - [`0x1::features`](features.md#0x1_features) - [`0x1::fixed_point32`](fixed_point32.md#0x1_fixed_point32) - [`0x1::hash`](hash.md#0x1_hash) +- [`0x1::mem`](mem.md#0x1_mem) - [`0x1::option`](option.md#0x1_option) - [`0x1::signer`](signer.md#0x1_signer) - [`0x1::string`](string.md#0x1_string) diff --git a/aptos-move/framework/move-stdlib/doc/signer.md b/aptos-move/framework/move-stdlib/doc/signer.md index f6de8799b571c..13e9e008ef8d3 100644 --- a/aptos-move/framework/move-stdlib/doc/signer.md +++ b/aptos-move/framework/move-stdlib/doc/signer.md @@ -18,12 +18,26 @@ ## Function `borrow_address` -Borrows the address of the signer -Conceptually, you can think of the signer as being a struct wrapper around an -address +signer is a builtin move type that represents an address that has been verfied by the VM. + +VM Runtime representation is equivalent to following: +``` +enum signer has drop { +Master { account: address }, +Permissioned { account: address, permissions_address: address }, +} +``` + +for bcs serialization: + ``` -struct signer has drop { addr: address } +struct signer has drop { +account: address, +} ``` +^ The discrepency is needed to maintain backwards compatibility of signer serialization +semantics. + borrow_address borrows this inner field diff --git a/aptos-move/framework/move-stdlib/doc/string.md b/aptos-move/framework/move-stdlib/doc/string.md index b45c55afe9902..3a35620bbe1cf 100644 --- a/aptos-move/framework/move-stdlib/doc/string.md +++ b/aptos-move/framework/move-stdlib/doc/string.md @@ -150,7 +150,7 @@ Tries to create a new string from a sequence of bytes. Returns a reference to the underlying byte vector. -
public fun bytes(s: &string::String): &vector<u8>
+
public fun bytes(self: &string::String): &vector<u8>
 
@@ -159,8 +159,8 @@ Returns a reference to the underlying byte vector. Implementation -
public fun bytes(s: &String): &vector<u8> {
-    &s.bytes
+
public fun bytes(self: &String): &vector<u8> {
+    &self.bytes
 }
 
@@ -175,7 +175,7 @@ Returns a reference to the underlying byte vector. Checks whether this string is empty. -
public fun is_empty(s: &string::String): bool
+
public fun is_empty(self: &string::String): bool
 
@@ -184,8 +184,8 @@ Checks whether this string is empty. Implementation -
public fun is_empty(s: &String): bool {
-    vector::is_empty(&s.bytes)
+
public fun is_empty(self: &String): bool {
+    self.bytes.is_empty()
 }
 
@@ -200,7 +200,7 @@ Checks whether this string is empty. Returns the length of this string, in bytes. -
public fun length(s: &string::String): u64
+
public fun length(self: &string::String): u64
 
@@ -209,8 +209,8 @@ Returns the length of this string, in bytes. Implementation -
public fun length(s: &String): u64 {
-    vector::length(&s.bytes)
+
public fun length(self: &String): u64 {
+    self.bytes.length()
 }
 
@@ -225,7 +225,7 @@ Returns the length of this string, in bytes. Appends a string. -
public fun append(s: &mut string::String, r: string::String)
+
public fun append(self: &mut string::String, r: string::String)
 
@@ -234,8 +234,8 @@ Appends a string. Implementation -
public fun append(s: &mut String, r: String) {
-    vector::append(&mut s.bytes, r.bytes)
+
public fun append(self: &mut String, r: String) {
+    self.bytes.append(r.bytes)
 }
 
@@ -250,7 +250,7 @@ Appends a string. Appends bytes which must be in valid utf8 format. -
public fun append_utf8(s: &mut string::String, bytes: vector<u8>)
+
public fun append_utf8(self: &mut string::String, bytes: vector<u8>)
 
@@ -259,8 +259,8 @@ Appends bytes which must be in valid utf8 format. Implementation -
public fun append_utf8(s: &mut String, bytes: vector<u8>) {
-    append(s, utf8(bytes))
+
public fun append_utf8(self: &mut String, bytes: vector<u8>) {
+    self.append(utf8(bytes))
 }
 
@@ -276,7 +276,7 @@ Insert the other string at the byte index in given string. The index must be at boundary. -
public fun insert(s: &mut string::String, at: u64, o: string::String)
+
public fun insert(self: &mut string::String, at: u64, o: string::String)
 
@@ -285,15 +285,15 @@ boundary. Implementation -
public fun insert(s: &mut String, at: u64, o: String) {
-    let bytes = &s.bytes;
-    assert!(at <= vector::length(bytes) && internal_is_char_boundary(bytes, at), EINVALID_INDEX);
-    let l = length(s);
-    let front = sub_string(s, 0, at);
-    let end = sub_string(s, at, l);
-    append(&mut front, o);
-    append(&mut front, end);
-    *s = front;
+
public fun insert(self: &mut String, at: u64, o: String) {
+    let bytes = &self.bytes;
+    assert!(at <= bytes.length() && internal_is_char_boundary(bytes, at), EINVALID_INDEX);
+    let l = self.length();
+    let front = self.sub_string(0, at);
+    let end = self.sub_string(at, l);
+    front.append(o);
+    front.append(end);
+    *self = front;
 }
 
@@ -310,7 +310,7 @@ of the first byte not included (or the length of the string). The indices must b guaranteeing that the result is valid utf8. -
public fun sub_string(s: &string::String, i: u64, j: u64): string::String
+
public fun sub_string(self: &string::String, i: u64, j: u64): string::String
 
@@ -319,9 +319,9 @@ guaranteeing that the result is valid utf8. Implementation -
public fun sub_string(s: &String, i: u64, j: u64): String {
-    let bytes = &s.bytes;
-    let l = vector::length(bytes);
+
public fun sub_string(self: &String, i: u64, j: u64): String {
+    let bytes = &self.bytes;
+    let l = bytes.length();
     assert!(
         j <= l && i <= j && internal_is_char_boundary(bytes, i) && internal_is_char_boundary(bytes, j),
         EINVALID_INDEX
@@ -341,7 +341,7 @@ guaranteeing that the result is valid utf8.
 Computes the index of the first occurrence of a string. Returns length(s) if no occurrence found.
 
 
-
public fun index_of(s: &string::String, r: &string::String): u64
+
public fun index_of(self: &string::String, r: &string::String): u64
 
@@ -350,8 +350,8 @@ Computes the index of the first occurrence of a string. Returns index_of(s: &String, r: &String): u64 { - internal_index_of(&s.bytes, &r.bytes) +
public fun index_of(self: &String, r: &String): u64 {
+    internal_index_of(&self.bytes, &r.bytes)
 }
 
diff --git a/aptos-move/framework/move-stdlib/doc/vector.md b/aptos-move/framework/move-stdlib/doc/vector.md index eee7cac04edc8..89596d86036a6 100644 --- a/aptos-move/framework/move-stdlib/doc/vector.md +++ b/aptos-move/framework/move-stdlib/doc/vector.md @@ -24,6 +24,7 @@ the return on investment didn't seem worth it for these simple functions. - [Function `pop_back`](#0x1_vector_pop_back) - [Function `destroy_empty`](#0x1_vector_destroy_empty) - [Function `swap`](#0x1_vector_swap) +- [Function `move_range`](#0x1_vector_move_range) - [Function `singleton`](#0x1_vector_singleton) - [Function `reverse`](#0x1_vector_reverse) - [Function `reverse_slice`](#0x1_vector_reverse_slice) @@ -39,6 +40,7 @@ the return on investment didn't seem worth it for these simple functions. - [Function `remove`](#0x1_vector_remove) - [Function `remove_value`](#0x1_vector_remove_value) - [Function `swap_remove`](#0x1_vector_swap_remove) +- [Function `replace`](#0x1_vector_replace) - [Function `for_each`](#0x1_vector_for_each) - [Function `for_each_reverse`](#0x1_vector_for_each_reverse) - [Function `for_each_ref`](#0x1_vector_for_each_ref) @@ -86,7 +88,8 @@ the return on investment didn't seem worth it for these simple functions. - [Function `rotate_slice`](#@Specification_1_rotate_slice) -
+
use 0x1::mem;
+
@@ -145,6 +148,18 @@ The length of the vectors are not equal. + + +Whether to utilize native vector::move_range +Vector module cannot call features module, due to cyclic dependency, +so this is a constant. + + +
const USE_MOVE_RANGE: bool = true;
+
+ + + ## Function `empty` @@ -177,7 +192,7 @@ Return the length of the vector.
#[bytecode_instruction]
-public fun length<Element>(v: &vector<Element>): u64
+public fun length<Element>(self: &vector<Element>): u64
 
@@ -186,7 +201,7 @@ Return the length of the vector. Implementation -
native public fun length<Element>(v: &vector<Element>): u64;
+
native public fun length<Element>(self: &vector<Element>): u64;
 
@@ -197,12 +212,12 @@ Return the length of the vector. ## Function `borrow` -Acquire an immutable reference to the ith element of the vector v. +Acquire an immutable reference to the ith element of the vector self. Aborts if i is out of bounds.
#[bytecode_instruction]
-public fun borrow<Element>(v: &vector<Element>, i: u64): &Element
+public fun borrow<Element>(self: &vector<Element>, i: u64): &Element
 
@@ -211,7 +226,7 @@ Aborts if i is out of bounds. Implementation -
native public fun borrow<Element>(v: &vector<Element>, i: u64): ∈
+
native public fun borrow<Element>(self: &vector<Element>, i: u64): ∈
 
@@ -222,11 +237,11 @@ Aborts if i is out of bounds. ## Function `push_back` -Add element e to the end of the vector v. +Add element e to the end of the vector self.
#[bytecode_instruction]
-public fun push_back<Element>(v: &mut vector<Element>, e: Element)
+public fun push_back<Element>(self: &mut vector<Element>, e: Element)
 
@@ -235,7 +250,7 @@ Add element e to the end of the vector v. Implementation -
native public fun push_back<Element>(v: &mut vector<Element>, e: Element);
+
native public fun push_back<Element>(self: &mut vector<Element>, e: Element);
 
@@ -246,12 +261,12 @@ Add element e to the end of the vector v. ## Function `borrow_mut` -Return a mutable reference to the ith element in the vector v. +Return a mutable reference to the ith element in the vector self. Aborts if i is out of bounds.
#[bytecode_instruction]
-public fun borrow_mut<Element>(v: &mut vector<Element>, i: u64): &mut Element
+public fun borrow_mut<Element>(self: &mut vector<Element>, i: u64): &mut Element
 
@@ -260,7 +275,7 @@ Aborts if i is out of bounds. Implementation -
native public fun borrow_mut<Element>(v: &mut vector<Element>, i: u64): &mut Element;
+
native public fun borrow_mut<Element>(self: &mut vector<Element>, i: u64): &mut Element;
 
@@ -271,12 +286,12 @@ Aborts if i is out of bounds. ## Function `pop_back` -Pop an element from the end of vector v. -Aborts if v is empty. +Pop an element from the end of vector self. +Aborts if self is empty.
#[bytecode_instruction]
-public fun pop_back<Element>(v: &mut vector<Element>): Element
+public fun pop_back<Element>(self: &mut vector<Element>): Element
 
@@ -285,7 +300,7 @@ Aborts if v is empty. Implementation -
native public fun pop_back<Element>(v: &mut vector<Element>): Element;
+
native public fun pop_back<Element>(self: &mut vector<Element>): Element;
 
@@ -296,12 +311,12 @@ Aborts if v is empty. ## Function `destroy_empty` -Destroy the vector v. -Aborts if v is not empty. +Destroy the vector self. +Aborts if self is not empty.
#[bytecode_instruction]
-public fun destroy_empty<Element>(v: vector<Element>)
+public fun destroy_empty<Element>(self: vector<Element>)
 
@@ -310,7 +325,7 @@ Aborts if v is not empty. Implementation -
native public fun destroy_empty<Element>(v: vector<Element>);
+
native public fun destroy_empty<Element>(self: vector<Element>);
 
@@ -321,12 +336,12 @@ Aborts if v is not empty. ## Function `swap` -Swaps the elements at the ith and jth indices in the vector v. +Swaps the elements at the ith and jth indices in the vector self. Aborts if i or j is out of bounds.
#[bytecode_instruction]
-public fun swap<Element>(v: &mut vector<Element>, i: u64, j: u64)
+public fun swap<Element>(self: &mut vector<Element>, i: u64, j: u64)
 
@@ -335,7 +350,44 @@ Aborts if i or j is out of bounds. Implementation -
native public fun swap<Element>(v: &mut vector<Element>, i: u64, j: u64);
+
native public fun swap<Element>(self: &mut vector<Element>, i: u64, j: u64);
+
+ + + + + + + +## Function `move_range` + +Moves range of elements [removal_position, removal_position + length) from vector from, +to vector to, inserting them starting at the insert_position. +In the from vector, elements after the selected range are moved left to fill the hole +(i.e. range is removed, while the order of the rest of the elements is kept) +In the to vector, elements after the insert_position are moved to the right to make +space for new elements (i.e. range is inserted, while the order of the rest of the +elements is kept). +Move prevents from having two mutable references to the same value, so from and to +vectors are always distinct. + + +
public fun move_range<T>(from: &mut vector<T>, removal_position: u64, length: u64, to: &mut vector<T>, insert_position: u64)
+
+ + + +
+Implementation + + +
native public fun move_range<T>(
+    from: &mut vector<T>,
+    removal_position: u64,
+    length: u64,
+    to: &mut vector<T>,
+    insert_position: u64
+);
 
@@ -360,7 +412,7 @@ Return an vector of size one containing element e.
public fun singleton<Element>(e: Element): vector<Element> {
     let v = empty();
-    push_back(&mut v, e);
+    v.push_back(e);
     v
 }
 
@@ -373,10 +425,10 @@ Return an vector of size one containing element e. ## Function `reverse` -Reverses the order of the elements in the vector v in place. +Reverses the order of the elements in the vector self in place. -
public fun reverse<Element>(v: &mut vector<Element>)
+
public fun reverse<Element>(self: &mut vector<Element>)
 
@@ -385,9 +437,9 @@ Reverses the order of the elements in the vector v in place. Implementation -
public fun reverse<Element>(v: &mut vector<Element>) {
-    let len = length(v);
-    reverse_slice(v, 0, len);
+
public fun reverse<Element>(self: &mut vector<Element>) {
+    let len = self.length();
+    self.reverse_slice(0, len);
 }
 
@@ -399,10 +451,10 @@ Reverses the order of the elements in the vector v in place. ## Function `reverse_slice` -Reverses the order of the elements [left, right) in the vector v in place. +Reverses the order of the elements [left, right) in the vector self in place. -
public fun reverse_slice<Element>(v: &mut vector<Element>, left: u64, right: u64)
+
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64)
 
@@ -411,14 +463,14 @@ Reverses the order of the elements [left, right) in the vector v in Implementation -
public fun reverse_slice<Element>(v: &mut vector<Element>, left: u64, right: u64) {
+
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64) {
     assert!(left <= right, EINVALID_RANGE);
     if (left == right) return;
-    right = right - 1;
+    right -= 1;
     while (left < right) {
-        swap(v, left, right);
-        left = left + 1;
-        right = right - 1;
+        self.swap(left, right);
+        left += 1;
+        right -= 1;
     }
 }
 
@@ -431,10 +483,10 @@ Reverses the order of the elements [left, right) in the vector v in ## Function `append` -Pushes all of the elements of the other vector into the lhs vector. +Pushes all of the elements of the other vector into the self vector. -
public fun append<Element>(lhs: &mut vector<Element>, other: vector<Element>)
+
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>)
 
@@ -443,9 +495,16 @@ Pushes all of the elements of the other vector into the lhsImplementation -
public fun append<Element>(lhs: &mut vector<Element>, other: vector<Element>) {
-    reverse(&mut other);
-    reverse_append(lhs, other);
+
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>) {
+    if (USE_MOVE_RANGE) {
+        let self_length = self.length();
+        let other_length = other.length();
+        move_range(&mut other, 0, other_length, self, self_length);
+        other.destroy_empty();
+    } else {
+        other.reverse();
+        self.reverse_append(other);
+    }
 }
 
@@ -457,10 +516,10 @@ Pushes all of the elements of the other vector into the lhsother vector into the lhs vector. +Pushes all of the elements of the other vector into the self vector. -
public fun reverse_append<Element>(lhs: &mut vector<Element>, other: vector<Element>)
+
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>)
 
@@ -469,13 +528,13 @@ Pushes all of the elements of the other vector into the lhsImplementation -
public fun reverse_append<Element>(lhs: &mut vector<Element>, other: vector<Element>) {
-    let len = length(&other);
+
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>) {
+    let len = other.length();
     while (len > 0) {
-        push_back(lhs, pop_back(&mut other));
-        len = len - 1;
+        self.push_back(other.pop_back());
+        len -= 1;
     };
-    destroy_empty(other);
+    other.destroy_empty();
 }
 
@@ -487,10 +546,14 @@ Pushes all of the elements of the other vector into the lhssplit_off. -
public fun trim<Element>(v: &mut vector<Element>, new_len: u64): vector<Element>
+
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
 
@@ -499,10 +562,22 @@ Trim a vector to a smaller size, returning the evicted elements in order Implementation -
public fun trim<Element>(v: &mut vector<Element>, new_len: u64): vector<Element> {
-    let res = trim_reverse(v, new_len);
-    reverse(&mut res);
-    res
+
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element> {
+    let len = self.length();
+    assert!(new_len <= len, EINDEX_OUT_OF_BOUNDS);
+
+    let other = empty();
+    if (USE_MOVE_RANGE) {
+        move_range(self, new_len, len - new_len, &mut other, 0);
+    } else {
+        while (len > new_len) {
+            other.push_back(self.pop_back());
+            len -= 1;
+        };
+        other.reverse();
+    };
+
+    other
 }
 
@@ -517,7 +592,7 @@ Trim a vector to a smaller size, returning the evicted elements in order Trim a vector to a smaller size, returning the evicted elements in reverse order -
public fun trim_reverse<Element>(v: &mut vector<Element>, new_len: u64): vector<Element>
+
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
 
@@ -526,13 +601,13 @@ Trim a vector to a smaller size, returning the evicted elements in reverse order Implementation -
public fun trim_reverse<Element>(v: &mut vector<Element>, new_len: u64): vector<Element> {
-    let len = length(v);
+
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element> {
+    let len = self.length();
     assert!(new_len <= len, EINDEX_OUT_OF_BOUNDS);
     let result = empty();
     while (new_len < len) {
-        push_back(&mut result, pop_back(v));
-        len = len - 1;
+        result.push_back(self.pop_back());
+        len -= 1;
     };
     result
 }
@@ -546,10 +621,10 @@ Trim a vector to a smaller size, returning the evicted elements in reverse order
 
 ## Function `is_empty`
 
-Return true if the vector v has no elements and false otherwise.
+Return true if the vector self has no elements and false otherwise.
 
 
-
public fun is_empty<Element>(v: &vector<Element>): bool
+
public fun is_empty<Element>(self: &vector<Element>): bool
 
@@ -558,8 +633,8 @@ Return true if the vector v has no elements and Implementation -
public fun is_empty<Element>(v: &vector<Element>): bool {
-    length(v) == 0
+
public fun is_empty<Element>(self: &vector<Element>): bool {
+    self.length() == 0
 }
 
@@ -571,10 +646,10 @@ Return true if the vector v has no elements and ## Function `contains` -Return true if e is in the vector v. +Return true if e is in the vector self. -
public fun contains<Element>(v: &vector<Element>, e: &Element): bool
+
public fun contains<Element>(self: &vector<Element>, e: &Element): bool
 
@@ -583,12 +658,12 @@ Return true if e is in the vector v. Implementation -
public fun contains<Element>(v: &vector<Element>, e: &Element): bool {
+
public fun contains<Element>(self: &vector<Element>, e: &Element): bool {
     let i = 0;
-    let len = length(v);
+    let len = self.length();
     while (i < len) {
-        if (borrow(v, i) == e) return true;
-        i = i + 1;
+        if (self.borrow(i) == e) return true;
+        i += 1;
     };
     false
 }
@@ -602,11 +677,11 @@ Return true if e is in the vector v.
 
 ## Function `index_of`
 
-Return (true, i) if e is in the vector v at index i.
+Return (true, i) if e is in the vector self at index i.
 Otherwise, returns (false, 0).
 
 
-
public fun index_of<Element>(v: &vector<Element>, e: &Element): (bool, u64)
+
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64)
 
@@ -615,12 +690,12 @@ Otherwise, returns (false, 0). Implementation -
public fun index_of<Element>(v: &vector<Element>, e: &Element): (bool, u64) {
+
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64) {
     let i = 0;
-    let len = length(v);
+    let len = self.length();
     while (i < len) {
-        if (borrow(v, i) == e) return (true, i);
-        i = i + 1;
+        if (self.borrow(i) == e) return (true, i);
+        i += 1;
     };
     (false, 0)
 }
@@ -639,7 +714,7 @@ the predicate, only the index of the first one is returned.
 Otherwise, returns (false, 0).
 
 
-
public fun find<Element>(v: &vector<Element>, f: |&Element|bool): (bool, u64)
+
public fun find<Element>(self: &vector<Element>, f: |&Element|bool): (bool, u64)
 
@@ -648,19 +723,19 @@ Otherwise, returns (false, 0). Implementation -
public inline fun find<Element>(v: &vector<Element>, f: |&Element|bool): (bool, u64) {
+
public inline fun find<Element>(self: &vector<Element>, f: |&Element|bool): (bool, u64) {
     let find = false;
     let found_index = 0;
     let i = 0;
-    let len = length(v);
+    let len = self.length();
     while (i < len) {
         // Cannot call return in an inline function so we need to resort to break here.
-        if (f(borrow(v, i))) {
+        if (f(self.borrow(i))) {
             find = true;
             found_index = i;
             break
         };
-        i = i + 1;
+        i += 1;
     };
     (find, found_index)
 }
@@ -678,7 +753,7 @@ Insert a new element at position 0 <= i <= length, using O(length - i) time.
 Aborts if out of bounds.
 
 
-
public fun insert<Element>(v: &mut vector<Element>, i: u64, e: Element)
+
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element)
 
@@ -687,13 +762,30 @@ Aborts if out of bounds. Implementation -
public fun insert<Element>(v: &mut vector<Element>, i: u64, e: Element) {
-    let len = length(v);
+
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element) {
+    let len = self.length();
     assert!(i <= len, EINDEX_OUT_OF_BOUNDS);
-    push_back(v, e);
-    while (i < len) {
-        swap(v, i, len);
-        i = i + 1;
+
+    if (USE_MOVE_RANGE) {
+        if (i + 2 >= len) {
+            // When we are close to the end, it is cheaper to not create
+            // a temporary vector, and swap directly
+            self.push_back(e);
+            while (i < len) {
+                self.swap(i, len);
+                i += 1;
+            };
+        } else {
+            let other = singleton(e);
+            move_range(&mut other, 0, 1, self, i);
+            other.destroy_empty();
+        }
+    } else {
+        self.push_back(e);
+        while (i < len) {
+            self.swap(i, len);
+            i += 1;
+        };
     };
 }
 
@@ -706,12 +798,12 @@ Aborts if out of bounds. ## Function `remove` -Remove the ith element of the vector v, shifting all subsequent elements. +Remove the ith element of the vector self, shifting all subsequent elements. This is O(n) and preserves ordering of elements in the vector. Aborts if i is out of bounds. -
public fun remove<Element>(v: &mut vector<Element>, i: u64): Element
+
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element
 
@@ -720,14 +812,30 @@ Aborts if i is out of bounds. Implementation -
public fun remove<Element>(v: &mut vector<Element>, i: u64): Element {
-    let len = length(v);
+
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element {
+    let len = self.length();
     // i out of bounds; abort
     if (i >= len) abort EINDEX_OUT_OF_BOUNDS;
 
-    len = len - 1;
-    while (i < len) swap(v, i, { i = i + 1; i });
-    pop_back(v)
+    if (USE_MOVE_RANGE) {
+        // When we are close to the end, it is cheaper to not create
+        // a temporary vector, and swap directly
+        if (i + 3 >= len) {
+            len -= 1;
+            while (i < len) self.swap(i, { i += 1; i });
+            self.pop_back()
+        } else {
+            let other = empty();
+            move_range(self, i, 1, &mut other, 0);
+            let result = other.pop_back();
+            other.destroy_empty();
+            result
+        }
+    } else {
+        len -= 1;
+        while (i < len) self.swap(i, { i += 1; i });
+        self.pop_back()
+    }
 }
 
@@ -739,7 +847,7 @@ Aborts if i is out of bounds. ## Function `remove_value` -Remove the first occurrence of a given value in the vector v and return it in a vector, shifting all +Remove the first occurrence of a given value in the vector self and return it in a vector, shifting all subsequent elements. This is O(n) and preserves ordering of elements in the vector. This returns an empty vector if the value isn't present in the vector. @@ -747,7 +855,7 @@ Note that this cannot return an option as option uses vector and there'd be a ci and vector. -
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element>
+
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element>
 
@@ -756,12 +864,12 @@ and vector. Implementation -
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element> {
+
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element> {
     // This doesn't cost a O(2N) run time as index_of scans from left to right and stops when the element is found,
     // while remove would continue from the identified index to the end of the vector.
-    let (found, index) = index_of(v, val);
+    let (found, index) = self.index_of(val);
     if (found) {
-        vector[remove(v, index)]
+        vector[self.remove(index)]
     } else {
        vector[]
     }
@@ -776,12 +884,42 @@ and vector.
 
 ## Function `swap_remove`
 
-Swap the ith element of the vector v with the last element and then pop the vector.
+Swap the ith element of the vector self with the last element and then pop the vector.
 This is O(1), but does not preserve ordering of elements in the vector.
 Aborts if i is out of bounds.
 
 
-
public fun swap_remove<Element>(v: &mut vector<Element>, i: u64): Element
+
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element
+
+ + + +
+Implementation + + +
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element {
+    assert!(!self.is_empty(), EINDEX_OUT_OF_BOUNDS);
+    let last_idx = self.length() - 1;
+    self.swap(i, last_idx);
+    self.pop_back()
+}
+
+ + + +
+ + + +## Function `replace` + +Replace the ith element of the vector self with the given value, and return +to the caller the value that was there before. +Aborts if i is out of bounds. + + +
public fun replace<Element>(self: &mut vector<Element>, i: u64, val: Element): Element
 
@@ -790,11 +928,16 @@ Aborts if i is out of bounds. Implementation -
public fun swap_remove<Element>(v: &mut vector<Element>, i: u64): Element {
-    assert!(!is_empty(v), EINDEX_OUT_OF_BOUNDS);
-    let last_idx = length(v) - 1;
-    swap(v, i, last_idx);
-    pop_back(v)
+
public fun replace<Element>(self: &mut vector<Element>, i: u64, val: Element): Element {
+    let last_idx = self.length();
+    assert!(i < last_idx, EINDEX_OUT_OF_BOUNDS);
+    if (USE_MOVE_RANGE) {
+        mem::replace(self.borrow_mut(i), val)
+    } else {
+        self.push_back(val);
+        self.swap(i, last_idx);
+        self.pop_back()
+    }
 }
 
@@ -809,7 +952,7 @@ Aborts if i is out of bounds. Apply the function to each element in the vector, consuming it. -
public fun for_each<Element>(v: vector<Element>, f: |Element|)
+
public fun for_each<Element>(self: vector<Element>, f: |Element|)
 
@@ -818,9 +961,9 @@ Apply the function to each element in the vector, consuming it. Implementation -
public inline fun for_each<Element>(v: vector<Element>, f: |Element|) {
-    reverse(&mut v); // We need to reverse the vector to consume it efficiently
-    for_each_reverse(v, |e| f(e));
+
public inline fun for_each<Element>(self: vector<Element>, f: |Element|) {
+    self.reverse(); // We need to reverse the vector to consume it efficiently
+    self.for_each_reverse(|e| f(e));
 }
 
@@ -835,7 +978,7 @@ Apply the function to each element in the vector, consuming it. Apply the function to each element in the vector, consuming it. -
public fun for_each_reverse<Element>(v: vector<Element>, f: |Element|)
+
public fun for_each_reverse<Element>(self: vector<Element>, f: |Element|)
 
@@ -844,13 +987,13 @@ Apply the function to each element in the vector, consuming it. Implementation -
public inline fun for_each_reverse<Element>(v: vector<Element>, f: |Element|) {
-    let len = length(&v);
+
public inline fun for_each_reverse<Element>(self: vector<Element>, f: |Element|) {
+    let len = self.length();
     while (len > 0) {
-        f(pop_back(&mut v));
-        len = len - 1;
+        f(self.pop_back());
+        len -= 1;
     };
-    destroy_empty(v)
+    self.destroy_empty()
 }
 
@@ -865,7 +1008,7 @@ Apply the function to each element in the vector, consuming it. Apply the function to a reference of each element in the vector. -
public fun for_each_ref<Element>(v: &vector<Element>, f: |&Element|)
+
public fun for_each_ref<Element>(self: &vector<Element>, f: |&Element|)
 
@@ -874,12 +1017,12 @@ Apply the function to a reference of each element in the vector. Implementation -
public inline fun for_each_ref<Element>(v: &vector<Element>, f: |&Element|) {
+
public inline fun for_each_ref<Element>(self: &vector<Element>, f: |&Element|) {
     let i = 0;
-    let len = length(v);
+    let len = self.length();
     while (i < len) {
-        f(borrow(v, i));
-        i = i + 1
+        f(self.borrow(i));
+        i += 1
     }
 }
 
@@ -895,7 +1038,7 @@ Apply the function to a reference of each element in the vector. Apply the function to each pair of elements in the two given vectors, consuming them. -
public fun zip<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
+
public fun zip<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
 
@@ -904,11 +1047,11 @@ Apply the function to each pair of elements in the two given vectors, consuming Implementation -
public inline fun zip<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |Element1, Element2|) {
+
public inline fun zip<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |Element1, Element2|) {
     // We need to reverse the vectors to consume it efficiently
-    reverse(&mut v1);
-    reverse(&mut v2);
-    zip_reverse(v1, v2, |e1, e2| f(e1, e2));
+    self.reverse();
+    v2.reverse();
+    self.zip_reverse(v2, |e1, e2| f(e1, e2));
 }
 
@@ -924,7 +1067,7 @@ Apply the function to each pair of elements in the two given vectors in the reve This errors out if the vectors are not of the same length. -
public fun zip_reverse<Element1, Element2>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
+
public fun zip_reverse<Element1, Element2>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|)
 
@@ -934,20 +1077,20 @@ This errors out if the vectors are not of the same length.
public inline fun zip_reverse<Element1, Element2>(
-    v1: vector<Element1>,
+    self: vector<Element1>,
     v2: vector<Element2>,
     f: |Element1, Element2|,
 ) {
-    let len = length(&v1);
+    let len = self.length();
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(len == length(&v2), 0x20002);
+    assert!(len == v2.length(), 0x20002);
     while (len > 0) {
-        f(pop_back(&mut v1), pop_back(&mut v2));
-        len = len - 1;
+        f(self.pop_back(), v2.pop_back());
+        len -= 1;
     };
-    destroy_empty(v1);
-    destroy_empty(v2);
+    self.destroy_empty();
+    v2.destroy_empty();
 }
 
@@ -963,7 +1106,7 @@ Apply the function to the references of each pair of elements in the two given v This errors out if the vectors are not of the same length. -
public fun zip_ref<Element1, Element2>(v1: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|)
+
public fun zip_ref<Element1, Element2>(self: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|)
 
@@ -973,18 +1116,18 @@ This errors out if the vectors are not of the same length.
public inline fun zip_ref<Element1, Element2>(
-    v1: &vector<Element1>,
+    self: &vector<Element1>,
     v2: &vector<Element2>,
     f: |&Element1, &Element2|,
 ) {
-    let len = length(v1);
+    let len = self.length();
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(len == length(v2), 0x20002);
+    assert!(len == v2.length(), 0x20002);
     let i = 0;
     while (i < len) {
-        f(borrow(v1, i), borrow(v2, i));
-        i = i + 1
+        f(self.borrow(i), v2.borrow(i));
+        i += 1
     }
 }
 
@@ -1000,7 +1143,7 @@ This errors out if the vectors are not of the same length. Apply the function to a reference of each element in the vector with its index. -
public fun enumerate_ref<Element>(v: &vector<Element>, f: |(u64, &Element)|)
+
public fun enumerate_ref<Element>(self: &vector<Element>, f: |(u64, &Element)|)
 
@@ -1009,12 +1152,12 @@ Apply the function to a reference of each element in the vector with its index. Implementation -
public inline fun enumerate_ref<Element>(v: &vector<Element>, f: |u64, &Element|) {
+
public inline fun enumerate_ref<Element>(self: &vector<Element>, f: |u64, &Element|) {
     let i = 0;
-    let len = length(v);
+    let len = self.length();
     while (i < len) {
-        f(i, borrow(v, i));
-        i = i + 1;
+        f(i, self.borrow(i));
+        i += 1;
     };
 }
 
@@ -1030,7 +1173,7 @@ Apply the function to a reference of each element in the vector with its index. Apply the function to a mutable reference to each element in the vector. -
public fun for_each_mut<Element>(v: &mut vector<Element>, f: |&mut Element|)
+
public fun for_each_mut<Element>(self: &mut vector<Element>, f: |&mut Element|)
 
@@ -1039,12 +1182,12 @@ Apply the function to a mutable reference to each element in the vector. Implementation -
public inline fun for_each_mut<Element>(v: &mut vector<Element>, f: |&mut Element|) {
+
public inline fun for_each_mut<Element>(self: &mut vector<Element>, f: |&mut Element|) {
     let i = 0;
-    let len = length(v);
+    let len = self.length();
     while (i < len) {
-        f(borrow_mut(v, i));
-        i = i + 1
+        f(self.borrow_mut(i));
+        i += 1
     }
 }
 
@@ -1061,7 +1204,7 @@ Apply the function to mutable references to each pair of elements in the two giv This errors out if the vectors are not of the same length. -
public fun zip_mut<Element1, Element2>(v1: &mut vector<Element1>, v2: &mut vector<Element2>, f: |(&mut Element1, &mut Element2)|)
+
public fun zip_mut<Element1, Element2>(self: &mut vector<Element1>, v2: &mut vector<Element2>, f: |(&mut Element1, &mut Element2)|)
 
@@ -1071,18 +1214,18 @@ This errors out if the vectors are not of the same length.
public inline fun zip_mut<Element1, Element2>(
-    v1: &mut vector<Element1>,
+    self: &mut vector<Element1>,
     v2: &mut vector<Element2>,
     f: |&mut Element1, &mut Element2|,
 ) {
     let i = 0;
-    let len = length(v1);
+    let len = self.length();
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(len == length(v2), 0x20002);
+    assert!(len == v2.length(), 0x20002);
     while (i < len) {
-        f(borrow_mut(v1, i), borrow_mut(v2, i));
-        i = i + 1
+        f(self.borrow_mut(i), v2.borrow_mut(i));
+        i += 1
     }
 }
 
@@ -1098,7 +1241,7 @@ This errors out if the vectors are not of the same length. Apply the function to a mutable reference of each element in the vector with its index. -
public fun enumerate_mut<Element>(v: &mut vector<Element>, f: |(u64, &mut Element)|)
+
public fun enumerate_mut<Element>(self: &mut vector<Element>, f: |(u64, &mut Element)|)
 
@@ -1107,12 +1250,12 @@ Apply the function to a mutable reference of each element in the vector with its Implementation -
public inline fun enumerate_mut<Element>(v: &mut vector<Element>, f: |u64, &mut Element|) {
+
public inline fun enumerate_mut<Element>(self: &mut vector<Element>, f: |u64, &mut Element|) {
     let i = 0;
-    let len = length(v);
+    let len = self.length();
     while (i < len) {
-        f(i, borrow_mut(v, i));
-        i = i + 1;
+        f(i, self.borrow_mut(i));
+        i += 1;
     };
 }
 
@@ -1129,7 +1272,7 @@ Fold the function over the elements. For example, fold<Accumulator, Element>(v: vector<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator +
public fun fold<Accumulator, Element>(self: vector<Element>, init: Accumulator, f: |(Accumulator, Element)|Accumulator): Accumulator
 
@@ -1139,12 +1282,12 @@ Fold the function over the elements. For example, fold<Accumulator, Element>( - v: vector<Element>, + self: vector<Element>, init: Accumulator, f: |Accumulator,Element|Accumulator ): Accumulator { let accu = init; - for_each(v, |elem| accu = f(accu, elem)); + self.for_each(|elem| accu = f(accu, elem)); accu }
@@ -1161,7 +1304,7 @@ Fold right like fold above but working right to left. For example, f(1, f(2, f(3, 0))) -
public fun foldr<Accumulator, Element>(v: vector<Element>, init: Accumulator, f: |(Element, Accumulator)|Accumulator): Accumulator
+
public fun foldr<Accumulator, Element>(self: vector<Element>, init: Accumulator, f: |(Element, Accumulator)|Accumulator): Accumulator
 
@@ -1171,12 +1314,12 @@ Fold right like fold above but working right to left. For example, public inline fun foldr<Accumulator, Element>( - v: vector<Element>, + self: vector<Element>, init: Accumulator, f: |Element, Accumulator|Accumulator ): Accumulator { let accu = init; - for_each_reverse(v, |elem| accu = f(elem, accu)); + self.for_each_reverse(|elem| accu = f(elem, accu)); accu }
@@ -1193,7 +1336,7 @@ Map the function over the references of the elements of the vector, producing a original vector. -
public fun map_ref<Element, NewElement>(v: &vector<Element>, f: |&Element|NewElement): vector<NewElement>
+
public fun map_ref<Element, NewElement>(self: &vector<Element>, f: |&Element|NewElement): vector<NewElement>
 
@@ -1203,11 +1346,11 @@ original vector.
public inline fun map_ref<Element, NewElement>(
-    v: &vector<Element>,
+    self: &vector<Element>,
     f: |&Element|NewElement
 ): vector<NewElement> {
     let result = vector<NewElement>[];
-    for_each_ref(v, |elem| push_back(&mut result, f(elem)));
+    self.for_each_ref(|elem| result.push_back(f(elem)));
     result
 }
 
@@ -1224,7 +1367,7 @@ Map the function over the references of the element pairs of two vectors, produc values without modifying the original vectors. -
public fun zip_map_ref<Element1, Element2, NewElement>(v1: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|NewElement): vector<NewElement>
+
public fun zip_map_ref<Element1, Element2, NewElement>(self: &vector<Element1>, v2: &vector<Element2>, f: |(&Element1, &Element2)|NewElement): vector<NewElement>
 
@@ -1234,16 +1377,16 @@ values without modifying the original vectors.
public inline fun zip_map_ref<Element1, Element2, NewElement>(
-    v1: &vector<Element1>,
+    self: &vector<Element1>,
     v2: &vector<Element2>,
     f: |&Element1, &Element2|NewElement
 ): vector<NewElement> {
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(length(v1) == length(v2), 0x20002);
+    assert!(self.length() == v2.length(), 0x20002);
 
     let result = vector<NewElement>[];
-    zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    self.zip_ref(v2, |e1, e2| result.push_back(f(e1, e2)));
     result
 }
 
@@ -1259,7 +1402,7 @@ values without modifying the original vectors. Map the function over the elements of the vector, producing a new vector. -
public fun map<Element, NewElement>(v: vector<Element>, f: |Element|NewElement): vector<NewElement>
+
public fun map<Element, NewElement>(self: vector<Element>, f: |Element|NewElement): vector<NewElement>
 
@@ -1269,11 +1412,11 @@ Map the function over the elements of the vector, producing a new vector.
public inline fun map<Element, NewElement>(
-    v: vector<Element>,
+    self: vector<Element>,
     f: |Element|NewElement
 ): vector<NewElement> {
     let result = vector<NewElement>[];
-    for_each(v, |elem| push_back(&mut result, f(elem)));
+    self.for_each(|elem| result.push_back(f(elem)));
     result
 }
 
@@ -1289,7 +1432,7 @@ Map the function over the elements of the vector, producing a new vector. Map the function over the element pairs of the two vectors, producing a new vector. -
public fun zip_map<Element1, Element2, NewElement>(v1: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|NewElement): vector<NewElement>
+
public fun zip_map<Element1, Element2, NewElement>(self: vector<Element1>, v2: vector<Element2>, f: |(Element1, Element2)|NewElement): vector<NewElement>
 
@@ -1299,16 +1442,16 @@ Map the function over the element pairs of the two vectors, producing a new vect
public inline fun zip_map<Element1, Element2, NewElement>(
-    v1: vector<Element1>,
+    self: vector<Element1>,
     v2: vector<Element2>,
     f: |Element1, Element2|NewElement
 ): vector<NewElement> {
     // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it
     // due to how inline functions work.
-    assert!(length(&v1) == length(&v2), 0x20002);
+    assert!(self.length() == v2.length(), 0x20002);
 
     let result = vector<NewElement>[];
-    zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2)));
+    self.zip(v2, |e1, e2| result.push_back(f(e1, e2)));
     result
 }
 
@@ -1324,7 +1467,7 @@ Map the function over the element pairs of the two vectors, producing a new vect Filter the vector using the boolean function, removing all elements for which p(e) is not true. -
public fun filter<Element: drop>(v: vector<Element>, p: |&Element|bool): vector<Element>
+
public fun filter<Element: drop>(self: vector<Element>, p: |&Element|bool): vector<Element>
 
@@ -1334,12 +1477,12 @@ Filter the vector using the boolean function, removing all elements for which public inline fun filter<Element:drop>( - v: vector<Element>, + self: vector<Element>, p: |&Element|bool ): vector<Element> { let result = vector<Element>[]; - for_each(v, |elem| { - if (p(&elem)) push_back(&mut result, elem); + self.for_each(|elem| { + if (p(&elem)) result.push_back(elem); }); result } @@ -1358,7 +1501,7 @@ Preserves the relative order of the elements for which pred is true, BUT NOT for the elements for which pred is false. -
public fun partition<Element>(v: &mut vector<Element>, pred: |&Element|bool): u64
+
public fun partition<Element>(self: &mut vector<Element>, pred: |&Element|bool): u64
 
@@ -1368,23 +1511,23 @@ BUT NOT for the elements for which pred is false.
public inline fun partition<Element>(
-    v: &mut vector<Element>,
+    self: &mut vector<Element>,
     pred: |&Element|bool
 ): u64 {
     let i = 0;
-    let len = length(v);
+    let len = self.length();
     while (i < len) {
-        if (!pred(borrow(v, i))) break;
-        i = i + 1;
+        if (!pred(self.borrow(i))) break;
+        i += 1;
     };
     let p = i;
-    i = i + 1;
+    i += 1;
     while (i < len) {
-        if (pred(borrow(v, i))) {
-            swap(v, p, i);
-            p = p + 1;
+        if (pred(self.borrow(i))) {
+            self.swap(p, i);
+            p += 1;
         };
-        i = i + 1;
+        i += 1;
     };
     p
 }
@@ -1402,7 +1545,7 @@ rotate(&mut [1, 2, 3, 4, 5], 2) -> [3, 4, 5, 1, 2] in place, returns the split p
 ie. 3 in the example above
 
 
-
public fun rotate<Element>(v: &mut vector<Element>, rot: u64): u64
+
public fun rotate<Element>(self: &mut vector<Element>, rot: u64): u64
 
@@ -1412,11 +1555,11 @@ ie. 3 in the example above
public fun rotate<Element>(
-    v: &mut vector<Element>,
+    self: &mut vector<Element>,
     rot: u64
 ): u64 {
-    let len = length(v);
-    rotate_slice(v, 0, rot, len)
+    let len = self.length();
+    self.rotate_slice(0, rot, len)
 }
 
@@ -1432,7 +1575,7 @@ Same as above but on a sub-slice of an array [left, right) with left <= rot <= r returns the -
public fun rotate_slice<Element>(v: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
+
public fun rotate_slice<Element>(self: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
 
@@ -1442,14 +1585,14 @@ returns the
public fun rotate_slice<Element>(
-    v: &mut vector<Element>,
+    self: &mut vector<Element>,
     left: u64,
     rot: u64,
     right: u64
 ): u64 {
-    reverse_slice(v, left, rot);
-    reverse_slice(v, rot, right);
-    reverse_slice(v, left, right);
+    self.reverse_slice(left, rot);
+    self.reverse_slice(rot, right);
+    self.reverse_slice(left, right);
     left + (right - rot)
 }
 
@@ -1466,7 +1609,7 @@ Partition the array based on a predicate p, this routine is stable and thus preserves the relative order of the elements in the two partitions. -
public fun stable_partition<Element>(v: &mut vector<Element>, p: |&Element|bool): u64
+
public fun stable_partition<Element>(self: &mut vector<Element>, p: |&Element|bool): u64
 
@@ -1476,24 +1619,24 @@ preserves the relative order of the elements in the two partitions.
public inline fun stable_partition<Element>(
-    v: &mut vector<Element>,
+    self: &mut vector<Element>,
     p: |&Element|bool
 ): u64 {
-    let len = length(v);
+    let len = self.length();
     let t = empty();
     let f = empty();
     while (len > 0) {
-        let e = pop_back(v);
+        let e = self.pop_back();
         if (p(&e)) {
-            push_back(&mut t, e);
+            t.push_back(e);
         } else {
-            push_back(&mut f, e);
+            f.push_back(e);
         };
-        len = len - 1;
+        len -= 1;
     };
-    let pos = length(&t);
-    reverse_append(v, t);
-    reverse_append(v, f);
+    let pos = t.length();
+    self.reverse_append(t);
+    self.reverse_append(f);
     pos
 }
 
@@ -1509,7 +1652,7 @@ preserves the relative order of the elements in the two partitions. Return true if any element in the vector satisfies the predicate. -
public fun any<Element>(v: &vector<Element>, p: |&Element|bool): bool
+
public fun any<Element>(self: &vector<Element>, p: |&Element|bool): bool
 
@@ -1519,17 +1662,17 @@ Return true if any element in the vector satisfies the predicate.
public inline fun any<Element>(
-    v: &vector<Element>,
+    self: &vector<Element>,
     p: |&Element|bool
 ): bool {
     let result = false;
     let i = 0;
-    while (i < length(v)) {
-        result = p(borrow(v, i));
+    while (i < self.length()) {
+        result = p(self.borrow(i));
         if (result) {
             break
         };
-        i = i + 1
+        i += 1
     };
     result
 }
@@ -1546,7 +1689,7 @@ Return true if any element in the vector satisfies the predicate.
 Return true if all elements in the vector satisfy the predicate.
 
 
-
public fun all<Element>(v: &vector<Element>, p: |&Element|bool): bool
+
public fun all<Element>(self: &vector<Element>, p: |&Element|bool): bool
 
@@ -1556,17 +1699,17 @@ Return true if all elements in the vector satisfy the predicate.
public inline fun all<Element>(
-    v: &vector<Element>,
+    self: &vector<Element>,
     p: |&Element|bool
 ): bool {
     let result = true;
     let i = 0;
-    while (i < length(v)) {
-        result = p(borrow(v, i));
+    while (i < self.length()) {
+        result = p(self.borrow(i));
         if (!result) {
             break
         };
-        i = i + 1
+        i += 1
     };
     result
 }
@@ -1584,7 +1727,7 @@ Destroy a vector, just a wrapper around for_each_reverse with a descriptive name
 when used in the context of destroying a vector.
 
 
-
public fun destroy<Element>(v: vector<Element>, d: |Element|)
+
public fun destroy<Element>(self: vector<Element>, d: |Element|)
 
@@ -1594,10 +1737,10 @@ when used in the context of destroying a vector.
public inline fun destroy<Element>(
-    v: vector<Element>,
+    self: vector<Element>,
     d: |Element|
 ) {
-    for_each_reverse(v, |e| d(e))
+    self.for_each_reverse(|e| d(e))
 }
 
@@ -1649,8 +1792,8 @@ when used in the context of destroying a vector. let vec = vector[]; while (start < end) { - push_back(&mut vec, start); - start = start + step; + vec.push_back(start); + start += step; }; vec } @@ -1666,7 +1809,7 @@ when used in the context of destroying a vector. -
public fun slice<Element: copy>(v: &vector<Element>, start: u64, end: u64): vector<Element>
+
public fun slice<Element: copy>(self: &vector<Element>, start: u64, end: u64): vector<Element>
 
@@ -1676,16 +1819,16 @@ when used in the context of destroying a vector.
public fun slice<Element: copy>(
-    v: &vector<Element>,
+    self: &vector<Element>,
     start: u64,
     end: u64
 ): vector<Element> {
-    assert!(start <= end && end <= length(v), EINVALID_SLICE_RANGE);
+    assert!(start <= end && end <= self.length(), EINVALID_SLICE_RANGE);
 
     let vec = vector[];
     while (start < end) {
-        push_back(&mut vec, *borrow(v, start));
-        start = start + 1;
+        vec.push_back(self[start]);
+        start += 1;
     };
     vec
 }
@@ -1706,43 +1849,43 @@ when used in the context of destroying a vector.
 ### Helper Functions
 
 
-Check if v1 is equal to the result of adding e at the end of v2
+Check if self is equal to the result of adding e at the end of v2
 
 
 
 
 
-
fun eq_push_back<Element>(v1: vector<Element>, v2: vector<Element>, e: Element): bool {
-    len(v1) == len(v2) + 1 &&
-    v1[len(v1)-1] == e &&
-    v1[0..len(v1)-1] == v2[0..len(v2)]
+
fun eq_push_back<Element>(self: vector<Element>, v2: vector<Element>, e: Element): bool {
+    len(self) == len(v2) + 1 &&
+    self[len(self)-1] == e &&
+    self[0..len(self)-1] == v2[0..len(v2)]
 }
 
-Check if v is equal to the result of concatenating v1 and v2 +Check if self is equal to the result of concatenating v1 and v2 -
fun eq_append<Element>(v: vector<Element>, v1: vector<Element>, v2: vector<Element>): bool {
-    len(v) == len(v1) + len(v2) &&
-    v[0..len(v1)] == v1 &&
-    v[len(v1)..len(v)] == v2
+
fun eq_append<Element>(self: vector<Element>, v1: vector<Element>, v2: vector<Element>): bool {
+    len(self) == len(v1) + len(v2) &&
+    self[0..len(v1)] == v1 &&
+    self[len(v1)..len(self)] == v2
 }
 
-Check v1 is equal to the result of removing the first element of v2 +Check self is equal to the result of removing the first element of v2 -
fun eq_pop_front<Element>(v1: vector<Element>, v2: vector<Element>): bool {
-    len(v1) + 1 == len(v2) &&
-    v1 == v2[1..len(v2)]
+
fun eq_pop_front<Element>(self: vector<Element>, v2: vector<Element>): bool {
+    len(self) + 1 == len(v2) &&
+    self == v2[1..len(v2)]
 }
 
@@ -1761,14 +1904,14 @@ Check that v1 is equal to the result of removing the element at ind
-Check if v contains e. +Check if self contains e. -
fun spec_contains<Element>(v: vector<Element>, e: Element): bool {
-    exists x in v: x == e
+
fun spec_contains<Element>(self: vector<Element>, e: Element): bool {
+    exists x in self: x == e
 }
 
@@ -1796,7 +1939,7 @@ Check if v contains e. ### Function `reverse` -
public fun reverse<Element>(v: &mut vector<Element>)
+
public fun reverse<Element>(self: &mut vector<Element>)
 
@@ -1812,7 +1955,7 @@ Check if v contains e. ### Function `reverse_slice` -
public fun reverse_slice<Element>(v: &mut vector<Element>, left: u64, right: u64)
+
public fun reverse_slice<Element>(self: &mut vector<Element>, left: u64, right: u64)
 
@@ -1828,7 +1971,7 @@ Check if v contains e. ### Function `append` -
public fun append<Element>(lhs: &mut vector<Element>, other: vector<Element>)
+
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>)
 
@@ -1844,7 +1987,7 @@ Check if v contains e. ### Function `reverse_append` -
public fun reverse_append<Element>(lhs: &mut vector<Element>, other: vector<Element>)
+
public fun reverse_append<Element>(self: &mut vector<Element>, other: vector<Element>)
 
@@ -1860,7 +2003,7 @@ Check if v contains e. ### Function `trim` -
public fun trim<Element>(v: &mut vector<Element>, new_len: u64): vector<Element>
+
public fun trim<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
 
@@ -1876,7 +2019,7 @@ Check if v contains e. ### Function `trim_reverse` -
public fun trim_reverse<Element>(v: &mut vector<Element>, new_len: u64): vector<Element>
+
public fun trim_reverse<Element>(self: &mut vector<Element>, new_len: u64): vector<Element>
 
@@ -1892,7 +2035,7 @@ Check if v contains e. ### Function `is_empty` -
public fun is_empty<Element>(v: &vector<Element>): bool
+
public fun is_empty<Element>(self: &vector<Element>): bool
 
@@ -1908,7 +2051,7 @@ Check if v contains e. ### Function `contains` -
public fun contains<Element>(v: &vector<Element>, e: &Element): bool
+
public fun contains<Element>(self: &vector<Element>, e: &Element): bool
 
@@ -1924,7 +2067,7 @@ Check if v contains e. ### Function `index_of` -
public fun index_of<Element>(v: &vector<Element>, e: &Element): (bool, u64)
+
public fun index_of<Element>(self: &vector<Element>, e: &Element): (bool, u64)
 
@@ -1940,7 +2083,7 @@ Check if v contains e. ### Function `insert` -
public fun insert<Element>(v: &mut vector<Element>, i: u64, e: Element)
+
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element)
 
@@ -1956,7 +2099,7 @@ Check if v contains e. ### Function `remove` -
public fun remove<Element>(v: &mut vector<Element>, i: u64): Element
+
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element
 
@@ -1972,7 +2115,7 @@ Check if v contains e. ### Function `remove_value` -
public fun remove_value<Element>(v: &mut vector<Element>, val: &Element): vector<Element>
+
public fun remove_value<Element>(self: &mut vector<Element>, val: &Element): vector<Element>
 
@@ -1988,7 +2131,7 @@ Check if v contains e. ### Function `swap_remove` -
public fun swap_remove<Element>(v: &mut vector<Element>, i: u64): Element
+
public fun swap_remove<Element>(self: &mut vector<Element>, i: u64): Element
 
@@ -2004,7 +2147,7 @@ Check if v contains e. ### Function `rotate` -
public fun rotate<Element>(v: &mut vector<Element>, rot: u64): u64
+
public fun rotate<Element>(self: &mut vector<Element>, rot: u64): u64
 
@@ -2020,7 +2163,7 @@ Check if v contains e. ### Function `rotate_slice` -
public fun rotate_slice<Element>(v: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
+
public fun rotate_slice<Element>(self: &mut vector<Element>, left: u64, rot: u64, right: u64): u64
 
diff --git a/aptos-move/framework/move-stdlib/doc/vector_ext.md b/aptos-move/framework/move-stdlib/doc/vector_ext.md new file mode 100644 index 0000000000000..1fa207f777813 --- /dev/null +++ b/aptos-move/framework/move-stdlib/doc/vector_ext.md @@ -0,0 +1,201 @@ + + + +# Module `0x1::vector_ext` + + + +- [Constants](#@Constants_0) +- [Function `range_move`](#0x1_vector_ext_range_move) +- [Function `split_off`](#0x1_vector_ext_split_off) +- [Function `append`](#0x1_vector_ext_append) +- [Function `insert`](#0x1_vector_ext_insert) +- [Function `remove`](#0x1_vector_ext_remove) + + +
use 0x1::vector;
+
+ + + + + +## Constants + + + + +The index into the vector is out of bounds + + +
const EINDEX_OUT_OF_BOUNDS: u64 = 131072;
+
+ + + + + +## Function `range_move` + + + +
public fun range_move<T>(from: &mut vector<T>, removal_position: u64, length: u64, to: &mut vector<T>, insert_position: u64)
+
+ + + +
+Implementation + + +
public native fun range_move<T>(from: &mut vector<T>, removal_position: u64, length: u64, to: &mut vector<T>, insert_position: u64);
+
+ + + +
+ + + +## Function `split_off` + +Splits the collection into two at the given index. +Returns a newly allocated vector containing the elements in the range [at, len). +After the call, the original vector will be left containing the elements [0, at) +with its previous capacity unchanged. + + +
public fun split_off<Element>(self: &mut vector<Element>, at: u64): vector<Element>
+
+ + + +
+Implementation + + +
public fun split_off<Element>(self: &mut vector<Element>, at: u64): vector<Element> {
+    let len = vector::length(self);
+    assert!(at <= len, EINDEX_OUT_OF_BOUNDS);
+
+    let other = vector::empty();
+    range_move(self, at, len - at, &mut other, 0);
+
+    // let other = empty();
+    // while (len > at) {
+    //     push_back(&mut other, pop_back(self));
+    //     len = len - 1;
+    // };
+    // reverse(&mut other);
+    other
+}
+
+ + + +
+ + + +## Function `append` + +Pushes all of the elements of the other vector into the self vector. + + +
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>)
+
+ + + +
+Implementation + + +
public fun append<Element>(self: &mut vector<Element>, other: vector<Element>) {
+    let self_length = self.length();
+    let other_length = other.length();
+    range_move(&mut other, 0, other_length, self, self_length);
+    other.destroy_empty();
+    // reverse(&mut other);
+    // reverse_append(self, other);
+}
+
+ + + +
+ + + +## Function `insert` + + + +
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element)
+
+ + + +
+Implementation + + +
public fun insert<Element>(self: &mut vector<Element>, i: u64, e: Element) {
+    let len = self.length();
+    assert!(i <= len, EINDEX_OUT_OF_BOUNDS);
+
+    if (i == len) {
+        self.push_back(e);
+    } else {
+        let other = vector::singleton(e);
+        range_move(&mut other, 0, 1, self, i);
+        other.destroy_empty();
+    }
+}
+
+ + + +
+ + + +## Function `remove` + +Remove the ith element of the vector self, shifting all subsequent elements. +This is O(n) and preserves ordering of elements in the vector. +Aborts if i is out of bounds. + + +
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element
+
+ + + +
+Implementation + + +
public fun remove<Element>(self: &mut vector<Element>, i: u64): Element {
+    let len = self.length();
+    // i out of bounds; abort
+    if (i >= len) abort EINDEX_OUT_OF_BOUNDS;
+
+    if (i + 1 == len) {
+        self.pop_back()
+    } else {
+        let other = vector::empty();
+        range_move(self, i, 1, &mut other, 0);
+        let result = other.pop_back();
+        other.destroy_empty();
+        result
+    }
+}
+
+ + + +
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/move-stdlib/sources/acl.move b/aptos-move/framework/move-stdlib/sources/acl.move index 5cf71e635e182..76d718b94b2c6 100644 --- a/aptos-move/framework/move-stdlib/sources/acl.move +++ b/aptos-move/framework/move-stdlib/sources/acl.move @@ -22,25 +22,25 @@ module std::acl { } /// Add the address to the ACL. - public fun add(acl: &mut ACL, addr: address) { - assert!(!vector::contains(&mut acl.list, &addr), error::invalid_argument(ECONTAIN)); - vector::push_back(&mut acl.list, addr); + public fun add(self: &mut ACL, addr: address) { + assert!(!self.list.contains(&addr), error::invalid_argument(ECONTAIN)); + self.list.push_back(addr); } /// Remove the address from the ACL. - public fun remove(acl: &mut ACL, addr: address) { - let (found, index) = vector::index_of(&mut acl.list, &addr); + public fun remove(self: &mut ACL, addr: address) { + let (found, index) = self.list.index_of(&addr); assert!(found, error::invalid_argument(ENOT_CONTAIN)); - vector::remove(&mut acl.list, index); + self.list.remove(index); } /// Return true iff the ACL contains the address. - public fun contains(acl: &ACL, addr: address): bool { - vector::contains(&acl.list, &addr) + public fun contains(self: &ACL, addr: address): bool { + self.list.contains(&addr) } /// assert! that the ACL has the address. - public fun assert_contains(acl: &ACL, addr: address) { - assert!(contains(acl, addr), error::invalid_argument(ENOT_CONTAIN)); + public fun assert_contains(self: &ACL, addr: address) { + assert!(self.contains(addr), error::invalid_argument(ENOT_CONTAIN)); } } diff --git a/aptos-move/framework/move-stdlib/sources/acl.spec.move b/aptos-move/framework/move-stdlib/sources/acl.spec.move index 843496f4e8848..dcbb93e0b1f7f 100644 --- a/aptos-move/framework/move-stdlib/sources/acl.spec.move +++ b/aptos-move/framework/move-stdlib/sources/acl.spec.move @@ -3,25 +3,25 @@ spec std::acl { invariant forall i in 0..len(list), j in 0..len(list): list[i] == list[j] ==> i == j; } - spec fun spec_contains(acl: ACL, addr: address): bool { - exists a in acl.list: a == addr + spec fun spec_contains(self: ACL, addr: address): bool { + exists a in self.list: a == addr } - spec contains(acl: &ACL, addr: address): bool { - ensures result == spec_contains(acl, addr); + spec contains(self: &ACL, addr: address): bool { + ensures result == spec_contains(self, addr); } - spec add(acl: &mut ACL, addr: address) { - aborts_if spec_contains(acl, addr) with error::INVALID_ARGUMENT; - ensures spec_contains(acl, addr); + spec add(self: &mut ACL, addr: address) { + aborts_if spec_contains(self, addr) with error::INVALID_ARGUMENT; + ensures spec_contains(self, addr); } - spec remove(acl: &mut ACL, addr: address) { - aborts_if !spec_contains(acl, addr) with error::INVALID_ARGUMENT; - ensures !spec_contains(acl, addr); + spec remove(self: &mut ACL, addr: address) { + aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT; + ensures !spec_contains(self, addr); } - spec assert_contains(acl: &ACL, addr: address) { - aborts_if !spec_contains(acl, addr) with error::INVALID_ARGUMENT; + spec assert_contains(self: &ACL, addr: address) { + aborts_if !spec_contains(self, addr) with error::INVALID_ARGUMENT; } } diff --git a/aptos-move/framework/move-stdlib/sources/bcs.move b/aptos-move/framework/move-stdlib/sources/bcs.move index 79b4c988906f2..1113a02c9c269 100644 --- a/aptos-move/framework/move-stdlib/sources/bcs.move +++ b/aptos-move/framework/move-stdlib/sources/bcs.move @@ -3,9 +3,30 @@ /// published on-chain. See https://github.com/aptos-labs/bcs#binary-canonical-serialization-bcs for more /// details on BCS. module std::bcs { - /// Return the binary representation of `v` in BCS (Binary Canonical Serialization) format + use std::option::Option; + + /// Note: all natives would fail if the MoveValue contains a permissioned signer in it. + + /// Returns the binary representation of `v` in BCS (Binary Canonical Serialization) format. + /// Aborts with `0x1c5` error code if serialization fails. native public fun to_bytes(v: &MoveValue): vector; + /// Returns the size of the binary representation of `v` in BCS (Binary Canonical Serialization) format. + /// Aborts with `0x1c5` error code if there is a failure when calculating serialized size. + native public fun serialized_size(v: &MoveValue): u64; + + /// If the type has known constant (always the same, independent of instance) serialized size + /// in BCS (Binary Canonical Serialization) format, returns it, otherwise returns None. + /// Aborts with `0x1c5` error code if there is a failure when calculating serialized size. + /// + /// Note: + /// For some types it might not be known they have constant size, and function might return None. + /// For example, signer appears to have constant size, but it's size might change. + /// If this function returned Some() for some type before - it is guaranteed to continue returning Some(). + /// On the other hand, if function has returned None for some type, + /// it might change in the future to return Some() instead, if size becomes "known". + native public fun constant_serialized_size(): Option; + // ============================== // Module Specification spec module {} // switch to module documentation context @@ -14,4 +35,14 @@ module std::bcs { /// Native function which is defined in the prover's prelude. native fun serialize(v: &MoveValue): vector; } + + spec serialized_size(v: &MoveValue): u64 { + pragma opaque; + ensures result == len(serialize(v)); + } + + spec constant_serialized_size { + // TODO: temporary mockup. + pragma opaque; + } } diff --git a/aptos-move/framework/move-stdlib/sources/bit_vector.move b/aptos-move/framework/move-stdlib/sources/bit_vector.move index 7bf3e22694444..9fc65809eccd2 100644 --- a/aptos-move/framework/move-stdlib/sources/bit_vector.move +++ b/aptos-move/framework/move-stdlib/sources/bit_vector.move @@ -29,8 +29,8 @@ module std::bit_vector { invariant len(bit_field) == counter; }; (counter < length)}) { - vector::push_back(&mut bit_field, false); - counter = counter + 1; + bit_field.push_back(false); + counter += 1; }; spec { assert counter == length; @@ -53,59 +53,57 @@ module std::bit_vector { aborts_if length >= MAX_SIZE with ELENGTH; } - /// Set the bit at `bit_index` in the `bitvector` regardless of its previous state. - public fun set(bitvector: &mut BitVector, bit_index: u64) { - assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX); - let x = vector::borrow_mut(&mut bitvector.bit_field, bit_index); - *x = true; + /// Set the bit at `bit_index` in the `self` regardless of its previous state. + public fun set(self: &mut BitVector, bit_index: u64) { + assert!(bit_index < self.bit_field.length(), EINDEX); + self.bit_field[bit_index] = true; } spec set { include SetAbortsIf; - ensures bitvector.bit_field[bit_index]; + ensures self.bit_field[bit_index]; } spec schema SetAbortsIf { - bitvector: BitVector; + self: BitVector; bit_index: u64; - aborts_if bit_index >= length(bitvector) with EINDEX; + aborts_if bit_index >= self.length() with EINDEX; } - /// Unset the bit at `bit_index` in the `bitvector` regardless of its previous state. - public fun unset(bitvector: &mut BitVector, bit_index: u64) { - assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX); - let x = vector::borrow_mut(&mut bitvector.bit_field, bit_index); - *x = false; + /// Unset the bit at `bit_index` in the `self` regardless of its previous state. + public fun unset(self: &mut BitVector, bit_index: u64) { + assert!(bit_index < self.bit_field.length(), EINDEX); + self.bit_field[bit_index] = false; } spec unset { include UnsetAbortsIf; - ensures !bitvector.bit_field[bit_index]; + ensures !self.bit_field[bit_index]; } spec schema UnsetAbortsIf { - bitvector: BitVector; + self: BitVector; bit_index: u64; - aborts_if bit_index >= length(bitvector) with EINDEX; + aborts_if bit_index >= self.length() with EINDEX; } - /// Shift the `bitvector` left by `amount`. If `amount` is greater than the + /// Shift the `self` left by `amount`. If `amount` is greater than the /// bitvector's length the bitvector will be zeroed out. - public fun shift_left(bitvector: &mut BitVector, amount: u64) { - if (amount >= bitvector.length) { - vector::for_each_mut(&mut bitvector.bit_field, |elem| { + public fun shift_left(self: &mut BitVector, amount: u64) { + if (amount >= self.length) { + self.bit_field.for_each_mut(|elem| { *elem = false; }); } else { let i = amount; - while (i < bitvector.length) { - if (is_index_set(bitvector, i)) set(bitvector, i - amount) - else unset(bitvector, i - amount); - i = i + 1; + while (i < self.length) { + if (self.is_index_set(i)) self.set(i - amount) + else self.unset(i - amount); + i += 1; }; - i = bitvector.length - amount; + i = self.length - amount; - while (i < bitvector.length) { - unset(bitvector, i); - i = i + 1; + while (i < self.length) { + self.unset(i); + i += 1; }; } } @@ -114,62 +112,62 @@ module std::bit_vector { pragma verify = false; } - /// Return the value of the bit at `bit_index` in the `bitvector`. `true` + /// Return the value of the bit at `bit_index` in the `self`. `true` /// represents "1" and `false` represents a 0 - public fun is_index_set(bitvector: &BitVector, bit_index: u64): bool { - assert!(bit_index < vector::length(&bitvector.bit_field), EINDEX); - *vector::borrow(&bitvector.bit_field, bit_index) + public fun is_index_set(self: &BitVector, bit_index: u64): bool { + assert!(bit_index < self.bit_field.length(), EINDEX); + self.bit_field[bit_index] } spec is_index_set { include IsIndexSetAbortsIf; - ensures result == bitvector.bit_field[bit_index]; + ensures result == self.bit_field[bit_index]; } spec schema IsIndexSetAbortsIf { - bitvector: BitVector; + self: BitVector; bit_index: u64; - aborts_if bit_index >= length(bitvector) with EINDEX; + aborts_if bit_index >= self.length() with EINDEX; } - spec fun spec_is_index_set(bitvector: BitVector, bit_index: u64): bool { - if (bit_index >= length(bitvector)) { + spec fun spec_is_index_set(self: BitVector, bit_index: u64): bool { + if (bit_index >= self.length()) { false } else { - bitvector.bit_field[bit_index] + self.bit_field[bit_index] } } /// Return the length (number of usable bits) of this bitvector - public fun length(bitvector: &BitVector): u64 { - vector::length(&bitvector.bit_field) + public fun length(self: &BitVector): u64 { + self.bit_field.length() } /// Returns the length of the longest sequence of set bits starting at (and /// including) `start_index` in the `bitvector`. If there is no such /// sequence, then `0` is returned. - public fun longest_set_sequence_starting_at(bitvector: &BitVector, start_index: u64): u64 { - assert!(start_index < bitvector.length, EINDEX); + public fun longest_set_sequence_starting_at(self: &BitVector, start_index: u64): u64 { + assert!(start_index < self.length, EINDEX); let index = start_index; // Find the greatest index in the vector such that all indices less than it are set. while ({ spec { invariant index >= start_index; - invariant index == start_index || is_index_set(bitvector, index - 1); - invariant index == start_index || index - 1 < vector::length(bitvector.bit_field); - invariant forall j in start_index..index: is_index_set(bitvector, j); - invariant forall j in start_index..index: j < vector::length(bitvector.bit_field); + invariant index == start_index || self.is_index_set(index - 1); + invariant index == start_index || index - 1 < len(self.bit_field); + invariant forall j in start_index..index: self.is_index_set(j); + invariant forall j in start_index..index: j < len(self.bit_field); }; - index < bitvector.length + index < self.length }) { - if (!is_index_set(bitvector, index)) break; - index = index + 1; + if (!self.is_index_set(index)) break; + index += 1; }; index - start_index } - spec longest_set_sequence_starting_at(bitvector: &BitVector, start_index: u64): u64 { - aborts_if start_index >= bitvector.length; - ensures forall i in start_index..result: is_index_set(bitvector, i); + spec longest_set_sequence_starting_at(self: &BitVector, start_index: u64): u64 { + aborts_if start_index >= self.length; + ensures forall i in start_index..result: self.is_index_set(i); } #[test_only] @@ -178,21 +176,21 @@ module std::bit_vector { } #[verify_only] - public fun shift_left_for_verification_only(bitvector: &mut BitVector, amount: u64) { - if (amount >= bitvector.length) { - let len = vector::length(&bitvector.bit_field); + public fun shift_left_for_verification_only(self: &mut BitVector, amount: u64) { + if (amount >= self.length) { + let len = self.bit_field.length(); let i = 0; while ({ spec { - invariant len == bitvector.length; - invariant forall k in 0..i: !bitvector.bit_field[k]; - invariant forall k in i..bitvector.length: bitvector.bit_field[k] == old(bitvector).bit_field[k]; + invariant len == self.length; + invariant forall k in 0..i: !self.bit_field[k]; + invariant forall k in i..self.length: self.bit_field[k] == old(self).bit_field[k]; }; i < len }) { - let elem = vector::borrow_mut(&mut bitvector.bit_field, i); + let elem = self.bit_field.borrow_mut(i); *elem = false; - i = i + 1; + i += 1; }; } else { let i = amount; @@ -200,40 +198,40 @@ module std::bit_vector { while ({ spec { invariant i >= amount; - invariant bitvector.length == old(bitvector).length; - invariant forall j in amount..i: old(bitvector).bit_field[j] == bitvector.bit_field[j - amount]; - invariant forall j in (i-amount)..bitvector.length : old(bitvector).bit_field[j] == bitvector.bit_field[j]; - invariant forall k in 0..i-amount: bitvector.bit_field[k] == old(bitvector).bit_field[k + amount]; + invariant self.length == old(self).length; + invariant forall j in amount..i: old(self).bit_field[j] == self.bit_field[j - amount]; + invariant forall j in (i-amount)..self.length : old(self).bit_field[j] == self.bit_field[j]; + invariant forall k in 0..i-amount: self.bit_field[k] == old(self).bit_field[k + amount]; }; - i < bitvector.length + i < self.length }) { - if (is_index_set(bitvector, i)) set(bitvector, i - amount) - else unset(bitvector, i - amount); - i = i + 1; + if (self.is_index_set(i)) self.set(i - amount) + else self.unset(i - amount); + i += 1; }; - i = bitvector.length - amount; + i = self.length - amount; while ({ spec { - invariant forall j in bitvector.length - amount..i: !bitvector.bit_field[j]; - invariant forall k in 0..bitvector.length - amount: bitvector.bit_field[k] == old(bitvector).bit_field[k + amount]; - invariant i >= bitvector.length - amount; + invariant forall j in self.length - amount..i: !self.bit_field[j]; + invariant forall k in 0..self.length - amount: self.bit_field[k] == old(self).bit_field[k + amount]; + invariant i >= self.length - amount; }; - i < bitvector.length + i < self.length }) { - unset(bitvector, i); - i = i + 1; + self.unset(i); + i += 1; } } } spec shift_left_for_verification_only { aborts_if false; - ensures amount >= bitvector.length ==> (forall k in 0..bitvector.length: !bitvector.bit_field[k]); - ensures amount < bitvector.length ==> - (forall i in bitvector.length - amount..bitvector.length: !bitvector.bit_field[i]); - ensures amount < bitvector.length ==> - (forall i in 0..bitvector.length - amount: bitvector.bit_field[i] == old(bitvector).bit_field[i + amount]); + ensures amount >= self.length ==> (forall k in 0..self.length: !self.bit_field[k]); + ensures amount < self.length ==> + (forall i in self.length - amount..self.length: !self.bit_field[i]); + ensures amount < self.length ==> + (forall i in 0..self.length - amount: self.bit_field[i] == old(self).bit_field[i + amount]); } } diff --git a/aptos-move/framework/move-stdlib/sources/cmp.move b/aptos-move/framework/move-stdlib/sources/cmp.move new file mode 100644 index 0000000000000..af90ff7b275a0 --- /dev/null +++ b/aptos-move/framework/move-stdlib/sources/cmp.move @@ -0,0 +1,150 @@ +module std::cmp { + enum Ordering has copy, drop { + /// First value is less than the second value. + Less, + /// First value is equal to the second value. + Equal, + /// First value is greater than the second value. + Greater, + } + + /// Compares two values with the natural ordering: + /// - native types are compared identically to `<` and other operators + /// - complex types + /// - Structs and vectors - are compared lexicographically - first field/element is compared first, + /// and if equal we proceed to the next. + /// - enum's are compared first by their variant, and if equal - they are compared as structs are. + native public fun compare(first: &T, second: &T): Ordering; + + public fun is_eq(self: &Ordering): bool { + self is Ordering::Equal + } + + public fun is_ne(self: &Ordering): bool { + !(self is Ordering::Equal) + } + + public fun is_lt(self: &Ordering): bool { + self is Ordering::Less + } + + public fun is_le(self: &Ordering): bool { + !(self is Ordering::Greater) + } + + public fun is_gt(self: &Ordering): bool { + self is Ordering::Greater + } + + public fun is_ge(self: &Ordering): bool { + !(self is Ordering::Less) + } + + spec compare { + // TODO: temporary mockup. + pragma opaque; + } + + #[test_only] + struct SomeStruct has drop { + field_1: u64, + field_2: u64, + } + + #[test_only] + enum SimpleEnum has drop { + V { field: u64 }, + } + + #[test_only] + enum SomeEnum has drop { + V1 { field_1: u64 }, + V2 { field_2: u64 }, + V3 { field_3: SomeStruct }, + V4 { field_4: vector }, + V5 { field_5: SimpleEnum }, + } + + #[test] + fun test_compare_numbers() { + assert!(compare(&1, &5).is_ne(), 0); + assert!(!compare(&1, &5).is_eq(), 0); + assert!(compare(&1, &5).is_lt(), 1); + assert!(compare(&1, &5).is_le(), 2); + assert!(compare(&5, &5).is_eq(), 3); + assert!(!compare(&5, &5).is_ne(), 3); + assert!(!compare(&5, &5).is_lt(), 4); + assert!(compare(&5, &5).is_le(), 5); + assert!(!compare(&7, &5).is_eq(), 6); + assert!(compare(&7, &5).is_ne(), 6); + assert!(!compare(&7, &5).is_lt(), 7); + assert!(!compare(&7, &5).is_le(), 8); + + assert!(!compare(&1, &5).is_eq(), 0); + assert!(compare(&1, &5).is_ne(), 0); + assert!(compare(&1, &5).is_lt(), 1); + assert!(compare(&1, &5).is_le(), 2); + assert!(!compare(&1, &5).is_gt(), 1); + assert!(!compare(&1, &5).is_ge(), 1); + assert!(compare(&5, &5).is_eq(), 3); + assert!(!compare(&5, &5).is_ne(), 3); + assert!(!compare(&5, &5).is_lt(), 4); + assert!(compare(&5, &5).is_le(), 5); + assert!(!compare(&5, &5).is_gt(), 5); + assert!(compare(&5, &5).is_ge(), 5); + assert!(!compare(&7, &5).is_eq(), 6); + assert!(compare(&7, &5).is_ne(), 6); + assert!(!compare(&7, &5).is_lt(), 7); + assert!(!compare(&7, &5).is_le(), 8); + assert!(compare(&7, &5).is_gt(), 7); + assert!(compare(&7, &5).is_ge(), 8); + } + + #[test] + fun test_compare_vectors() { + let empty = vector[]; // here for typing, for the second line + assert!(compare(&empty, &vector[1] ) is Ordering::Less, 0); + assert!(compare(&empty, &vector[] ) is Ordering::Equal, 1); + assert!(compare(&vector[1], &vector[] ) is Ordering::Greater, 2); + assert!(compare(&vector[1, 2], &vector[1, 2] ) is Ordering::Equal, 3); + assert!(compare(&vector[1, 2, 3], &vector[5] ) is Ordering::Less, 4); + assert!(compare(&vector[1, 2, 3], &vector[5, 6, 7]) is Ordering::Less, 5); + assert!(compare(&vector[1, 2, 3], &vector[1, 2, 7]) is Ordering::Less, 6); + } + + #[test] + fun test_compare_structs() { + assert!(compare(&SomeStruct { field_1: 1, field_2: 2}, &SomeStruct { field_1: 1, field_2: 2}) is Ordering::Equal, 0); + assert!(compare(&SomeStruct { field_1: 1, field_2: 2}, &SomeStruct { field_1: 1, field_2: 3}) is Ordering::Less, 1); + assert!(compare(&SomeStruct { field_1: 1, field_2: 2}, &SomeStruct { field_1: 1, field_2: 1}) is Ordering::Greater, 2); + assert!(compare(&SomeStruct { field_1: 2, field_2: 1}, &SomeStruct { field_1: 1, field_2: 2}) is Ordering::Greater, 3); + } + + #[test] + fun test_compare_vector_of_structs() { + assert!(compare(&vector[SomeStruct { field_1: 1, field_2: 2}, SomeStruct { field_1: 3, field_2: 4}], &vector[SomeStruct { field_1: 1, field_2: 3}]) is Ordering::Less, 0); + assert!(compare(&vector[SomeStruct { field_1: 1, field_2: 2}, SomeStruct { field_1: 3, field_2: 4}], &vector[SomeStruct { field_1: 1, field_2: 2}, SomeStruct { field_1: 1, field_2: 3}]) is Ordering::Greater, 1); + } + + #[test] + fun test_compare_enums() { + assert!(compare(&SomeEnum::V1 { field_1: 6}, &SomeEnum::V1 { field_1: 6}) is Ordering::Equal, 0); + assert!(compare(&SomeEnum::V1 { field_1: 6}, &SomeEnum::V2 { field_2: 1}) is Ordering::Less, 1); + assert!(compare(&SomeEnum::V1 { field_1: 6}, &SomeEnum::V2 { field_2: 8}) is Ordering::Less, 2); + assert!(compare(&SomeEnum::V1 { field_1: 6}, &SomeEnum::V1 { field_1: 5}) is Ordering::Greater, 3); + + assert!(compare(&SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 2}}, &SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 2}}) is Ordering::Equal, 4); + assert!(compare(&SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 2}}, &SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 3}}) is Ordering::Less, 5); + assert!(compare(&SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 2}}, &SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 1}}) is Ordering::Greater, 6); + assert!(compare(&SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 2}}, &SomeEnum::V3 { field_3: SomeStruct { field_1: 1, field_2: 1}}) is Ordering::Greater, 7); + + assert!(compare(&SomeEnum::V4 { field_4: vector[1, 2]}, &SomeEnum::V4 { field_4: vector[1, 2]}) is Ordering::Equal, 8); + assert!(compare(&SomeEnum::V4 { field_4: vector[1, 2, 3]}, &SomeEnum::V4 { field_4: vector[5]}) is Ordering::Less, 9); + assert!(compare(&SomeEnum::V4 { field_4: vector[1, 2, 3]}, &SomeEnum::V4 { field_4: vector[5, 6, 7]}) is Ordering::Less, 10); + assert!(compare(&SomeEnum::V4 { field_4: vector[1, 2, 3]}, &SomeEnum::V4 { field_4: vector[1, 2, 7]}) is Ordering::Less, 11); + + assert!(compare(&SomeEnum::V5 { field_5: SimpleEnum::V { field: 3}}, &SomeEnum::V5 { field_5: SimpleEnum::V { field: 3}}) is Ordering::Equal, 12); + assert!(compare(&SomeEnum::V5 { field_5: SimpleEnum::V { field: 5}}, &SomeEnum::V5 { field_5: SimpleEnum::V { field: 3}}) is Ordering::Greater, 13); + assert!(compare(&SomeEnum::V5 { field_5: SimpleEnum::V { field: 3}}, &SomeEnum::V5 { field_5: SimpleEnum::V { field: 5}}) is Ordering::Less, 14); + } +} diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.move b/aptos-move/framework/move-stdlib/sources/configs/features.move index c270ebd1cf7f0..e7da50fa3c59f 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.move @@ -85,14 +85,18 @@ module std::features { is_enabled(VM_BINARY_FORMAT_V6) } - /// Whether gas fees are collected and distributed to the block proposers. + #[deprecated] + /// Deprecated feature /// Lifetime: transient const COLLECT_AND_DISTRIBUTE_GAS_FEES: u64 = 6; + #[deprecated] + /// Deprecated feature public fun get_collect_and_distribute_gas_fees_feature(): u64 { COLLECT_AND_DISTRIBUTE_GAS_FEES } - public fun collect_and_distribute_gas_fees(): bool acquires Features { - is_enabled(COLLECT_AND_DISTRIBUTE_GAS_FEES) + #[deprecated] + public fun collect_and_distribute_gas_fees(): bool { + false } /// Whether the new `aptos_stdlib::multi_ed25519::public_key_validate_internal_v2()` native is enabled. @@ -489,7 +493,7 @@ module std::features { public fun get_coin_to_fungible_asset_migration_feature(): u64 { COIN_TO_FUNGIBLE_ASSET_MIGRATION } public fun coin_to_fungible_asset_migration_feature_enabled(): bool acquires Features { - is_enabled(COIN_TO_FUNGIBLE_ASSET_MIGRATION) + is_enabled(COIN_TO_FUNGIBLE_ASSET_MIGRATION) } const PRIMARY_APT_FUNGIBLE_STORE_AT_USER_ADDRESS: u64 = 61; @@ -582,46 +586,150 @@ module std::features { is_enabled(ABORT_IF_MULTISIG_PAYLOAD_MISMATCH) } - /// Whether the Atomic bridge is available + /// Whether the simulation enhancement is enabled. This enables the simulation without an authentication check, + /// the sponsored transaction simulation when the fee payer is set to 0x0, and the multisig transaction + /// simulation consistnet with the execution. + /// + /// Lifetime: transient + const TRANSACTION_SIMULATION_ENHANCEMENT: u64 = 78; + + public fun get_transaction_simulation_enhancement_feature(): u64 { TRANSACTION_SIMULATION_ENHANCEMENT } + + public fun transaction_simulation_enhancement_enabled(): bool acquires Features { + is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT) + } + + const COLLECTION_OWNER: u64 = 79; + + public fun get_collection_owner_feature(): u64 { COLLECTION_OWNER } + + public fun is_collection_owner_enabled(): bool acquires Features { + is_enabled(COLLECTION_OWNER) + } + + const NATIVE_MEMORY_OPERATIONS: u64 = 80; + + public fun get_native_memory_operations_feature(): u64 { NATIVE_MEMORY_OPERATIONS } + + public fun is_native_memory_operations_enabled(): bool acquires Features { + is_enabled(NATIVE_MEMORY_OPERATIONS) + } + + const PERMISSIONED_SIGNER: u64 = 84; + + public fun get_permissioned_signer_feature(): u64 { PERMISSIONED_SIGNER } + + public fun is_permissioned_signer_enabled(): bool acquires Features { + is_enabled(PERMISSIONED_SIGNER) + } + + /// Whether the account abstraction is enabled. + /// /// Lifetime: transient - const ATOMIC_BRIDGE: u64 = 71; + const ACCOUNT_ABSTRACTION: u64 = 85; - public fun get_atomic_bridge_feature(): u64 { ATOMIC_BRIDGE } + public fun get_account_abstraction_feature(): u64 { ACCOUNT_ABSTRACTION } - public fun abort_atomic_bridge_enabled(): bool acquires Features { - is_enabled(ATOMIC_BRIDGE) + public fun is_account_abstraction_enabled(): bool acquires Features { + is_enabled(ACCOUNT_ABSTRACTION) } + /// Whether bytecode version v8 is enabled. + /// Lifetime: transient + /// + /// We do not expect use from Move, so for now only for documentation purposes here + const VM_BINARY_FORMAT_V8: u64 = 86; - /// Whether the Atomic bridge is available + /// Whether the batch Bulletproofs native functions are available. This is needed because of the introduction of a new native function. /// Lifetime: transient - const NATIVE_BRIDGE: u64 = 72; + const BULLETPROOFS_BATCH_NATIVES: u64 = 87; - public fun get_native_bridge_feature(): u64 { NATIVE_BRIDGE } + public fun get_bulletproofs_batch_feature(): u64 { BULLETPROOFS_BATCH_NATIVES } - public fun abort_native_bridge_enabled(): bool acquires Features { - is_enabled(NATIVE_BRIDGE) + public fun bulletproofs_batch_enabled(): bool acquires Features { + is_enabled(BULLETPROOFS_BATCH_NATIVES) } - /// Whether the Governed Gas Pool is used to capture gas fees + /// Whether the account abstraction is enabled. /// - /// Lifetime: permanent - const GOVERNED_GAS_POOL: u64 = 73; + /// Lifetime: transient + const DERIVABLE_ACCOUNT_ABSTRACTION: u64 = 88; + + public fun is_derivable_account_abstraction_enabled(): bool acquires Features { + is_enabled(DERIVABLE_ACCOUNT_ABSTRACTION) + } - /// Whether the Governed Gas Pool is enabled. - public fun get_governed_gas_pool_feature(): u64 { GOVERNED_GAS_POOL } + #[deprecated] + public fun is_domain_account_abstraction_enabled(): bool { + false + } + + /// Whether function values are enabled. + /// Lifetime: transient + /// + /// We do not expect use from Move, so for now only for documentation purposes here + const ENABLE_FUNCTION_VALUES: u64 = 89; + + /// Whether new accounts default to the Fungible Asset store. + /// Lifetime: transient + const NEW_ACCOUNTS_DEFAULT_TO_FA_STORE: u64 = 90; + + public fun get_new_accounts_default_to_fa_store_feature(): u64 { NEW_ACCOUNTS_DEFAULT_TO_FA_STORE } + + public fun new_accounts_default_to_fa_store_enabled(): bool acquires Features { + is_enabled(NEW_ACCOUNTS_DEFAULT_TO_FA_STORE) + } + + /// Lifetime: transient + const DEFAULT_ACCOUNT_RESOURCE: u64 = 91; + + public fun get_default_account_resource_feature(): u64 { DEFAULT_ACCOUNT_RESOURCE } + + public fun is_default_account_resource_enabled(): bool acquires Features { + is_enabled(DEFAULT_ACCOUNT_RESOURCE) + } + + /// If enabled, JWK consensus should run in per-key mode, where: + /// - The consensus is for key-level updates + /// (e.g., "issuer A key 1 should be deleted", "issuer B key 2 should be upserted"); + /// - transaction type `ValidatorTransaction::ObservedJWKUpdate` is reused; + /// - while a key-level update is mostly represented by a new type `KeyLevelUpdate` locally, + /// For simplicity, it is represented by type `ProviderJWKs` (used to represent issuer-level update) + /// in JWK Consensus messages, in validator transactions, and in Move. + const JWK_CONSENSUS_PER_KEY_MODE: u64 = 92; - public fun governed_gas_pool_enabled(): bool acquires Features { - is_enabled(GOVERNED_GAS_POOL) + public fun get_jwk_consensus_per_key_mode_feature(): u64 { JWK_CONSENSUS_PER_KEY_MODE } + + public fun is_jwk_consensus_per_key_mode_enabled(): bool acquires Features { + is_enabled(JWK_CONSENSUS_PER_KEY_MODE) } + /// Whether orderless transactions are enabled. /// Lifetime: transient - const DECOMMISSION_CORE_RESOURCES: u64 = 222; + const ORDERLESS_TRANSACTIONS: u64 = 94; + + public fun get_orderless_transactions_feature(): u64 { ORDERLESS_TRANSACTIONS } + + public fun orderless_transactions_enabled(): bool acquires Features { + is_enabled(ORDERLESS_TRANSACTIONS) + } + + /// Whether to calculate the transaction fee for distribution. + const CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION: u64 = 96; - public fun get_decommission_core_resources_feature(): u64 { DECOMMISSION_CORE_RESOURCES } + public fun get_calculate_transaction_fee_for_distribution_feature(): u64 { CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION } - public fun get_decommission_core_resources_enabled(): bool acquires Features { - is_enabled(DECOMMISSION_CORE_RESOURCES) + public fun is_calculate_transaction_fee_for_distribution_enabled(): bool acquires Features { + is_enabled(CALCULATE_TRANSACTION_FEE_FOR_DISTRIBUTION) + } + + /// Whether to distribute transaction fee to validators. + const DISTRIBUTE_TRANSACTION_FEE: u64 = 97; + + public fun get_distribute_transaction_fee_feature(): u64 { DISTRIBUTE_TRANSACTION_FEE } + + public fun is_distribute_transaction_fee_enabled(): bool acquires Features { + is_enabled(DISTRIBUTE_TRANSACTION_FEE) } // ============================================================================================ @@ -645,11 +753,9 @@ module std::features { /// /// Genesis/tests should use `change_feature_flags_internal()` for feature vec initialization. /// - /// This can be used on testnet prior to successful DKG. - /// /// Governance proposals should use `change_feature_flags_for_next_epoch()` to enable/disable features. - public fun change_feature_flags(framework: &signer, enable: vector, disable: vector) acquires Features { - change_feature_flags_internal(framework, enable, disable) + public fun change_feature_flags(_framework: &signer, _enable: vector, _disable: vector) { + abort (error::invalid_state(EAPI_DISABLED)) } /// Update feature flags directly. Only used in genesis/tests. @@ -658,11 +764,11 @@ module std::features { if (!exists(@std)) { move_to(framework, Features { features: vector[] }) }; - let features = &mut borrow_global_mut(@std).features; - vector::for_each_ref(&enable, |feature| { + let features = &mut Features[@std].features; + enable.for_each_ref(|feature| { set(features, *feature, true); }); - vector::for_each_ref(&disable, |feature| { + disable.for_each_ref(|feature| { set(features, *feature, false); }); } @@ -682,7 +788,7 @@ module std::features { features } else if (exists(@std)) { // Otherwise, use the currently effective feature flag vec as the baseline, if it exists. - borrow_global(@std).features + Features[@std].features } else { // Otherwise, use an empty feature vec. vector[] @@ -702,7 +808,7 @@ module std::features { if (exists(@std)) { let PendingFeatures { features } = move_from(@std); if (exists(@std)) { - borrow_global_mut(@std).features = features; + Features[@std].features = features; } else { move_to(framework, Features { features }) } @@ -713,35 +819,35 @@ module std::features { /// Check whether the feature is enabled. public fun is_enabled(feature: u64): bool acquires Features { exists(@std) && - contains(&borrow_global(@std).features, feature) + contains(&Features[@std].features, feature) } /// Helper to include or exclude a feature flag. fun set(features: &mut vector, feature: u64, include: bool) { let byte_index = feature / 8; let bit_mask = 1 << ((feature % 8) as u8); - while (vector::length(features) <= byte_index) { - vector::push_back(features, 0) + while (features.length() <= byte_index) { + features.push_back(0) }; - let entry = vector::borrow_mut(features, byte_index); + if (include) - *entry = *entry | bit_mask + features[byte_index] |= bit_mask else - *entry = *entry & (0xff ^ bit_mask) + features[byte_index] &= (0xff ^ bit_mask) } /// Helper to check whether a feature flag is enabled. fun contains(features: &vector, feature: u64): bool { let byte_index = feature / 8; let bit_mask = 1 << ((feature % 8) as u8); - byte_index < vector::length(features) && (*vector::borrow(features, byte_index) & bit_mask) != 0 + byte_index < features.length() && (features[byte_index] & bit_mask) != 0 } fun apply_diff(features: &mut vector, enable: vector, disable: vector) { - vector::for_each(enable, |feature| { + enable.for_each(|feature| { set(features, feature, true); }); - vector::for_each(disable, |feature| { + disable.for_each(|feature| { set(features, feature, false); }); } diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.spec.move b/aptos-move/framework/move-stdlib/sources/configs/features.spec.move index 2823108154016..febab729294cf 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.spec.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.spec.move @@ -1,15 +1,15 @@ /// Maintains feature flags. spec std::features { spec Features { - pragma bv=b"0"; + pragma bv = b"0"; } spec PendingFeatures { - pragma bv=b"0"; + pragma bv = b"0"; } spec set(features: &mut vector, feature: u64, include: bool) { - pragma bv=b"0"; + pragma bv = b"0"; aborts_if false; ensures feature / 8 < len(features); ensures include == spec_contains(features, feature); @@ -25,7 +25,7 @@ spec std::features { } spec contains(features: &vector, feature: u64): bool { - pragma bv=b"0"; + pragma bv = b"0"; aborts_if false; ensures result == spec_contains(features, feature); } @@ -40,7 +40,9 @@ spec std::features { } spec fun spec_contains(features: vector, feature: u64): bool { - ((int2bv((((1 as u8) << ((feature % (8 as u64)) as u64)) as u8)) as u8) & features[feature/8] as u8) > (0 as u8) + ((int2bv( + (((1 as u8) << ((feature % (8 as u64)) as u64)) as u8) + ) as u8) & features[feature / 8] as u8) > (0 as u8) && (feature / 8) < len(features) } @@ -66,10 +68,6 @@ spec std::features { spec_is_enabled(FEE_PAYER_ENABLED) } - spec fun spec_collect_and_distribute_gas_fees_enabled(): bool { - spec_is_enabled(COLLECT_AND_DISTRIBUTE_GAS_FEES) - } - spec fun spec_module_event_enabled(): bool { spec_is_enabled(MODULE_EVENT) } @@ -100,12 +98,30 @@ spec std::features { spec_is_enabled(ABORT_IF_MULTISIG_PAYLOAD_MISMATCH) } + spec fun spec_new_accounts_default_to_fa_apt_store_enabled(): bool { + spec_is_enabled(NEW_ACCOUNTS_DEFAULT_TO_FA_APT_STORE) + } + + spec fun spec_new_accounts_default_to_fa_store_enabled(): bool { + spec_is_enabled(NEW_ACCOUNTS_DEFAULT_TO_FA_STORE) + } + + spec fun spec_simulation_enhancement_enabled(): bool { + spec_is_enabled(TRANSACTION_SIMULATION_ENHANCEMENT) + } + spec abort_if_multisig_payload_mismatch_enabled { pragma opaque; aborts_if [abstract] false; ensures [abstract] result == spec_abort_if_multisig_payload_mismatch_enabled(); } + spec is_default_account_resource_enabled { + pragma opaque; + aborts_if [abstract] false; + ensures [abstract] result == spec_is_enabled(DEFAULT_ACCOUNT_RESOURCE); + } + spec on_new_epoch(framework: &signer) { requires @std == signer::address_of(framework); let features_pending = global(@std).features; diff --git a/aptos-move/framework/move-stdlib/sources/fixed_point32.move b/aptos-move/framework/move-stdlib/sources/fixed_point32.move index 96409a9ac4dfd..d5ae711d96940 100644 --- a/aptos-move/framework/move-stdlib/sources/fixed_point32.move +++ b/aptos-move/framework/move-stdlib/sources/fixed_point32.move @@ -40,7 +40,7 @@ module std::fixed_point32 { let product = unscaled_product >> 32; // Check whether the value is too large. assert!(product <= MAX_U64, EMULTIPLICATION); - (product as u64) + product as u64 } spec multiply_u64 { pragma opaque; @@ -144,13 +144,13 @@ module std::fixed_point32 { /// Accessor for the raw u64 value. Other less common operations, such as /// adding or subtracting FixedPoint32 values, can be done using the raw /// values directly. - public fun get_raw_value(num: FixedPoint32): u64 { - num.value + public fun get_raw_value(self: FixedPoint32): u64 { + self.value } /// Returns true if the ratio is zero. - public fun is_zero(num: FixedPoint32): bool { - num.value == 0 + public fun is_zero(self: FixedPoint32): bool { + self.value == 0 } /// Returns the smaller of the two FixedPoint32 numbers. @@ -216,27 +216,27 @@ module std::fixed_point32 { } /// Returns the largest integer less than or equal to a given number. - public fun floor(num: FixedPoint32): u64 { - num.value >> 32 + public fun floor(self: FixedPoint32): u64 { + self.value >> 32 } spec floor { pragma opaque; aborts_if false; - ensures result == spec_floor(num); + ensures result == spec_floor(self); } - spec fun spec_floor(val: FixedPoint32): u64 { - let fractional = val.value % (1 << 32); + spec fun spec_floor(self: FixedPoint32): u64 { + let fractional = self.value % (1 << 32); if (fractional == 0) { - val.value >> 32 + self.value >> 32 } else { - (val.value - fractional) >> 32 + (self.value - fractional) >> 32 } } /// Rounds up the given FixedPoint32 to the next largest integer. - public fun ceil(num: FixedPoint32): u64 { - let floored_num = floor(num) << 32; - if (num.value == floored_num) { + public fun ceil(self: FixedPoint32): u64 { + let floored_num = self.floor() << 32; + if (self.value == floored_num) { return floored_num >> 32 }; let val = ((floored_num as u128) + (1 << 32)); @@ -246,42 +246,42 @@ module std::fixed_point32 { pragma verify_duration_estimate = 120; pragma opaque; aborts_if false; - ensures result == spec_ceil(num); + ensures result == spec_ceil(self); } - spec fun spec_ceil(val: FixedPoint32): u64 { - let fractional = val.value % (1 << 32); + spec fun spec_ceil(self: FixedPoint32): u64 { + let fractional = self.value % (1 << 32); let one = 1 << 32; if (fractional == 0) { - val.value >> 32 + self.value >> 32 } else { - (val.value - fractional + one) >> 32 + (self.value - fractional + one) >> 32 } } /// Returns the value of a FixedPoint32 to the nearest integer. - public fun round(num: FixedPoint32): u64 { - let floored_num = floor(num) << 32; + public fun round(self: FixedPoint32): u64 { + let floored_num = self.floor() << 32; let boundary = floored_num + ((1 << 32) / 2); - if (num.value < boundary) { + if (self.value < boundary) { floored_num >> 32 } else { - ceil(num) + self.ceil() } } spec round { pragma verify_duration_estimate = 120; pragma opaque; aborts_if false; - ensures result == spec_round(num); + ensures result == spec_round(self); } - spec fun spec_round(val: FixedPoint32): u64 { - let fractional = val.value % (1 << 32); + spec fun spec_round(self: FixedPoint32): u64 { + let fractional = self.value % (1 << 32); let boundary = (1 << 32) / 2; let one = 1 << 32; if (fractional < boundary) { - (val.value - fractional) >> 32 + (self.value - fractional) >> 32 } else { - (val.value - fractional + one) >> 32 + (self.value - fractional + one) >> 32 } } diff --git a/aptos-move/framework/move-stdlib/sources/mem.move b/aptos-move/framework/move-stdlib/sources/mem.move new file mode 100644 index 0000000000000..a44e1feceeff1 --- /dev/null +++ b/aptos-move/framework/move-stdlib/sources/mem.move @@ -0,0 +1,35 @@ +/// Module with methods for safe memory manipulation. +module std::mem { + // TODO - functions here are `friend` here for one release, + // and to be changed to `public` one release later. + friend std::vector; + #[test_only] + friend std::mem_tests; + + /// Swap contents of two passed mutable references. + /// + /// Move prevents from having two mutable references to the same value, + /// so `left` and `right` references are always distinct. + native friend fun swap(left: &mut T, right: &mut T); + + /// Replace the value reference points to with the given new value, + /// and return the value it had before. + friend fun replace(ref: &mut T, new: T): T { + swap(ref, &mut new); + new + } + + spec swap(left: &mut T, right: &mut T) { + pragma opaque; + aborts_if false; + ensures right == old(left); + ensures left == old(right); + } + + spec replace(ref: &mut T, new: T): T { + pragma opaque; + aborts_if false; + ensures result == old(ref); + ensures ref == new; + } +} diff --git a/aptos-move/framework/move-stdlib/sources/option.move b/aptos-move/framework/move-stdlib/sources/option.move index 1793abfe9bc20..a5a984a096def 100644 --- a/aptos-move/framework/move-stdlib/sources/option.move +++ b/aptos-move/framework/move-stdlib/sources/option.move @@ -32,7 +32,7 @@ module std::option { ensures result == spec_none(); } spec fun spec_none(): Option { - Option{ vec: vec() } + Option{ vec: vector[] } } /// Return an `Option` containing `e` @@ -49,296 +49,296 @@ module std::option { } public fun from_vec(vec: vector): Option { - assert!(vector::length(&vec) <= 1, EOPTION_VEC_TOO_LONG); + assert!(vec.length() <= 1, EOPTION_VEC_TOO_LONG); Option { vec } } spec from_vec { - aborts_if vector::length(vec) > 1; + aborts_if vec.length() > 1; } - /// Return true if `t` does not hold a value - public fun is_none(t: &Option): bool { - vector::is_empty(&t.vec) + /// Return true if `self` does not hold a value + public fun is_none(self: &Option): bool { + self.vec.is_empty() } spec is_none { pragma opaque; aborts_if false; - ensures result == spec_is_none(t); + ensures result == spec_is_none(self); } - spec fun spec_is_none(t: Option): bool { - vector::is_empty(t.vec) + spec fun spec_is_none(self: Option): bool { + self.vec.is_empty() } - /// Return true if `t` holds a value - public fun is_some(t: &Option): bool { - !vector::is_empty(&t.vec) + /// Return true if `self` holds a value + public fun is_some(self: &Option): bool { + !self.vec.is_empty() } spec is_some { pragma opaque; aborts_if false; - ensures result == spec_is_some(t); + ensures result == spec_is_some(self); } - spec fun spec_is_some(t: Option): bool { - !vector::is_empty(t.vec) + spec fun spec_is_some(self: Option): bool { + !self.vec.is_empty() } - /// Return true if the value in `t` is equal to `e_ref` - /// Always returns `false` if `t` does not hold a value - public fun contains(t: &Option, e_ref: &Element): bool { - vector::contains(&t.vec, e_ref) + /// Return true if the value in `self` is equal to `e_ref` + /// Always returns `false` if `self` does not hold a value + public fun contains(self: &Option, e_ref: &Element): bool { + self.vec.contains(e_ref) } spec contains { pragma opaque; aborts_if false; - ensures result == spec_contains(t, e_ref); + ensures result == spec_contains(self, e_ref); } - spec fun spec_contains(t: Option, e: Element): bool { - is_some(t) && borrow(t) == e + spec fun spec_contains(self: Option, e: Element): bool { + self.is_some() && self.borrow() == e } - /// Return an immutable reference to the value inside `t` - /// Aborts if `t` does not hold a value - public fun borrow(t: &Option): &Element { - assert!(is_some(t), EOPTION_NOT_SET); - vector::borrow(&t.vec, 0) + /// Return an immutable reference to the value inside `self` + /// Aborts if `self` does not hold a value + public fun borrow(self: &Option): &Element { + assert!(self.is_some(), EOPTION_NOT_SET); + &self.vec[0] } spec borrow { pragma opaque; include AbortsIfNone; - ensures result == spec_borrow(t); + ensures result == spec_borrow(self); } - spec fun spec_borrow(t: Option): Element { - t.vec[0] + spec fun spec_borrow(self: Option): Element { + self.vec[0] } - /// Return a reference to the value inside `t` if it holds one - /// Return `default_ref` if `t` does not hold a value - public fun borrow_with_default(t: &Option, default_ref: &Element): &Element { - let vec_ref = &t.vec; - if (vector::is_empty(vec_ref)) default_ref - else vector::borrow(vec_ref, 0) + /// Return a reference to the value inside `self` if it holds one + /// Return `default_ref` if `self` does not hold a value + public fun borrow_with_default(self: &Option, default_ref: &Element): &Element { + let vec_ref = &self.vec; + if (vec_ref.is_empty()) default_ref + else &vec_ref[0] } spec borrow_with_default { pragma opaque; aborts_if false; - ensures result == (if (spec_is_some(t)) spec_borrow(t) else default_ref); + ensures result == (if (spec_is_some(self)) spec_borrow(self) else default_ref); } - /// Return the value inside `t` if it holds one - /// Return `default` if `t` does not hold a value + /// Return the value inside `self` if it holds one + /// Return `default` if `self` does not hold a value public fun get_with_default( - t: &Option, + self: &Option, default: Element, ): Element { - let vec_ref = &t.vec; - if (vector::is_empty(vec_ref)) default - else *vector::borrow(vec_ref, 0) + let vec_ref = &self.vec; + if (vec_ref.is_empty()) default + else vec_ref[0] } spec get_with_default { pragma opaque; aborts_if false; - ensures result == (if (spec_is_some(t)) spec_borrow(t) else default); + ensures result == (if (spec_is_some(self)) spec_borrow(self) else default); } - /// Convert the none option `t` to a some option by adding `e`. - /// Aborts if `t` already holds a value - public fun fill(t: &mut Option, e: Element) { - let vec_ref = &mut t.vec; - if (vector::is_empty(vec_ref)) vector::push_back(vec_ref, e) + /// Convert the none option `self` to a some option by adding `e`. + /// Aborts if `self` already holds a value + public fun fill(self: &mut Option, e: Element) { + let vec_ref = &mut self.vec; + if (vec_ref.is_empty()) vec_ref.push_back(e) else abort EOPTION_IS_SET } spec fill { pragma opaque; - aborts_if spec_is_some(t) with EOPTION_IS_SET; - ensures spec_is_some(t); - ensures spec_borrow(t) == e; + aborts_if spec_is_some(self) with EOPTION_IS_SET; + ensures spec_is_some(self); + ensures spec_borrow(self) == e; } - /// Convert a `some` option to a `none` by removing and returning the value stored inside `t` - /// Aborts if `t` does not hold a value - public fun extract(t: &mut Option): Element { - assert!(is_some(t), EOPTION_NOT_SET); - vector::pop_back(&mut t.vec) + /// Convert a `some` option to a `none` by removing and returning the value stored inside `self` + /// Aborts if `self` does not hold a value + public fun extract(self: &mut Option): Element { + assert!(self.is_some(), EOPTION_NOT_SET); + self.vec.pop_back() } spec extract { pragma opaque; include AbortsIfNone; - ensures result == spec_borrow(old(t)); - ensures spec_is_none(t); + ensures result == spec_borrow(old(self)); + ensures spec_is_none(self); } - /// Return a mutable reference to the value inside `t` - /// Aborts if `t` does not hold a value - public fun borrow_mut(t: &mut Option): &mut Element { - assert!(is_some(t), EOPTION_NOT_SET); - vector::borrow_mut(&mut t.vec, 0) + /// Return a mutable reference to the value inside `self` + /// Aborts if `self` does not hold a value + public fun borrow_mut(self: &mut Option): &mut Element { + assert!(self.is_some(), EOPTION_NOT_SET); + self.vec.borrow_mut(0) } spec borrow_mut { include AbortsIfNone; - ensures result == spec_borrow(t); - ensures t == old(t); + ensures result == spec_borrow(self); + ensures self == old(self); } - /// Swap the old value inside `t` with `e` and return the old value - /// Aborts if `t` does not hold a value - public fun swap(t: &mut Option, e: Element): Element { - assert!(is_some(t), EOPTION_NOT_SET); - let vec_ref = &mut t.vec; - let old_value = vector::pop_back(vec_ref); - vector::push_back(vec_ref, e); + /// Swap the old value inside `self` with `e` and return the old value + /// Aborts if `self` does not hold a value + public fun swap(self: &mut Option, e: Element): Element { + assert!(self.is_some(), EOPTION_NOT_SET); + let vec_ref = &mut self.vec; + let old_value = vec_ref.pop_back(); + vec_ref.push_back(e); old_value } spec swap { pragma opaque; include AbortsIfNone; - ensures result == spec_borrow(old(t)); - ensures spec_is_some(t); - ensures spec_borrow(t) == e; + ensures result == spec_borrow(old(self)); + ensures spec_is_some(self); + ensures spec_borrow(self) == e; } - /// Swap the old value inside `t` with `e` and return the old value; + /// Swap the old value inside `self` with `e` and return the old value; /// or if there is no old value, fill it with `e`. - /// Different from swap(), swap_or_fill() allows for `t` not holding a value. - public fun swap_or_fill(t: &mut Option, e: Element): Option { - let vec_ref = &mut t.vec; - let old_value = if (vector::is_empty(vec_ref)) none() - else some(vector::pop_back(vec_ref)); - vector::push_back(vec_ref, e); + /// Different from swap(), swap_or_fill() allows for `self` not holding a value. + public fun swap_or_fill(self: &mut Option, e: Element): Option { + let vec_ref = &mut self.vec; + let old_value = if (vec_ref.is_empty()) none() + else some(vec_ref.pop_back()); + vec_ref.push_back(e); old_value } spec swap_or_fill { pragma opaque; aborts_if false; - ensures result == old(t); - ensures spec_borrow(t) == e; + ensures result == old(self); + ensures spec_borrow(self) == e; } - /// Destroys `t.` If `t` holds a value, return it. Returns `default` otherwise - public fun destroy_with_default(t: Option, default: Element): Element { - let Option { vec } = t; - if (vector::is_empty(&mut vec)) default - else vector::pop_back(&mut vec) + /// Destroys `self.` If `self` holds a value, return it. Returns `default` otherwise + public fun destroy_with_default(self: Option, default: Element): Element { + let Option { vec } = self; + if (vec.is_empty()) default + else vec.pop_back() } spec destroy_with_default { pragma opaque; aborts_if false; - ensures result == (if (spec_is_some(t)) spec_borrow(t) else default); + ensures result == (if (spec_is_some(self)) spec_borrow(self) else default); } - /// Unpack `t` and return its contents - /// Aborts if `t` does not hold a value - public fun destroy_some(t: Option): Element { - assert!(is_some(&t), EOPTION_NOT_SET); - let Option { vec } = t; - let elem = vector::pop_back(&mut vec); - vector::destroy_empty(vec); + /// Unpack `self` and return its contents + /// Aborts if `self` does not hold a value + public fun destroy_some(self: Option): Element { + assert!(self.is_some(), EOPTION_NOT_SET); + let Option { vec } = self; + let elem = vec.pop_back(); + vec.destroy_empty(); elem } spec destroy_some { pragma opaque; include AbortsIfNone; - ensures result == spec_borrow(t); + ensures result == spec_borrow(self); } - /// Unpack `t` - /// Aborts if `t` holds a value - public fun destroy_none(t: Option) { - assert!(is_none(&t), EOPTION_IS_SET); - let Option { vec } = t; - vector::destroy_empty(vec) + /// Unpack `self` + /// Aborts if `self` holds a value + public fun destroy_none(self: Option) { + assert!(self.is_none(), EOPTION_IS_SET); + let Option { vec } = self; + vec.destroy_empty() } spec destroy_none { pragma opaque; - aborts_if spec_is_some(t) with EOPTION_IS_SET; + aborts_if spec_is_some(self) with EOPTION_IS_SET; } - /// Convert `t` into a vector of length 1 if it is `Some`, + /// Convert `self` into a vector of length 1 if it is `Some`, /// and an empty vector otherwise - public fun to_vec(t: Option): vector { - let Option { vec } = t; + public fun to_vec(self: Option): vector { + let Option { vec } = self; vec } spec to_vec { pragma opaque; aborts_if false; - ensures result == t.vec; + ensures result == self.vec; } /// Apply the function to the optional element, consuming it. Does nothing if no value present. - public inline fun for_each(o: Option, f: |Element|) { - if (is_some(&o)) { - f(destroy_some(o)) + public inline fun for_each(self: Option, f: |Element|) { + if (self.is_some()) { + f(self.destroy_some()) } else { - destroy_none(o) + self.destroy_none() } } /// Apply the function to the optional element reference. Does nothing if no value present. - public inline fun for_each_ref(o: &Option, f: |&Element|) { - if (is_some(o)) { - f(borrow(o)) + public inline fun for_each_ref(self: &Option, f: |&Element|) { + if (self.is_some()) { + f(self.borrow()) } } /// Apply the function to the optional element reference. Does nothing if no value present. - public inline fun for_each_mut(o: &mut Option, f: |&mut Element|) { - if (is_some(o)) { - f(borrow_mut(o)) + public inline fun for_each_mut(self: &mut Option, f: |&mut Element|) { + if (self.is_some()) { + f(self.borrow_mut()) } } /// Folds the function over the optional element. public inline fun fold( - o: Option, + self: Option, init: Accumulator, f: |Accumulator,Element|Accumulator ): Accumulator { - if (is_some(&o)) { - f(init, destroy_some(o)) + if (self.is_some()) { + f(init, self.destroy_some()) } else { - destroy_none(o); + self.destroy_none(); init } } /// Maps the content of an option. - public inline fun map(o: Option, f: |Element|OtherElement): Option { - if (is_some(&o)) { - some(f(destroy_some(o))) + public inline fun map(self: Option, f: |Element|OtherElement): Option { + if (self.is_some()) { + some(f(self.destroy_some())) } else { - destroy_none(o); + self.destroy_none(); none() } } /// Maps the content of an option without destroying the original option. public inline fun map_ref( - o: &Option, f: |&Element|OtherElement): Option { - if (is_some(o)) { - some(f(borrow(o))) + self: &Option, f: |&Element|OtherElement): Option { + if (self.is_some()) { + some(f(self.borrow())) } else { none() } } /// Filters the content of an option - public inline fun filter(o: Option, f: |&Element|bool): Option { - if (is_some(&o) && f(borrow(&o))) { - o + public inline fun filter(self: Option, f: |&Element|bool): Option { + if (self.is_some() && f(self.borrow())) { + self } else { none() } } /// Returns true if the option contains an element which satisfies predicate. - public inline fun any(o: &Option, p: |&Element|bool): bool { - is_some(o) && p(borrow(o)) + public inline fun any(self: &Option, p: |&Element|bool): bool { + self.is_some() && p(self.borrow()) } /// Utility function to destroy an option that is not droppable. - public inline fun destroy(o: Option, d: |Element|) { - let vec = to_vec(o); - vector::destroy(vec, |e| d(e)); + public inline fun destroy(self: Option, d: |Element|) { + let vec = self.to_vec(); + vec.destroy(|e| d(e)); } spec module {} // switch documentation context back to module level @@ -350,7 +350,7 @@ module std::option { /// # Helper Schema spec schema AbortsIfNone { - t: Option; - aborts_if spec_is_none(t) with EOPTION_NOT_SET; + self: Option; + aborts_if spec_is_none(self) with EOPTION_NOT_SET; } } diff --git a/aptos-move/framework/move-stdlib/sources/signer.move b/aptos-move/framework/move-stdlib/sources/signer.move index c2e3ab3f559f8..16d5c2cc8ddae 100644 --- a/aptos-move/framework/move-stdlib/sources/signer.move +++ b/aptos-move/framework/move-stdlib/sources/signer.move @@ -1,10 +1,24 @@ module std::signer { - /// Borrows the address of the signer - /// Conceptually, you can think of the `signer` as being a struct wrapper around an - /// address + /// signer is a builtin move type that represents an address that has been verfied by the VM. + /// + /// VM Runtime representation is equivalent to following: /// ``` - /// struct signer has drop { addr: address } + /// enum signer has drop { + /// Master { account: address }, + /// Permissioned { account: address, permissions_address: address }, + /// } /// ``` + /// + /// for bcs serialization: + /// + /// ``` + /// struct signer has drop { + /// account: address, + /// } + /// ``` + /// ^ The discrepency is needed to maintain backwards compatibility of signer serialization + /// semantics. + /// /// `borrow_address` borrows this inner field native public fun borrow_address(s: &signer): &address; diff --git a/aptos-move/framework/move-stdlib/sources/string.move b/aptos-move/framework/move-stdlib/sources/string.move index 6a2ca69d00ec7..5ce4313ef8e99 100644 --- a/aptos-move/framework/move-stdlib/sources/string.move +++ b/aptos-move/framework/move-stdlib/sources/string.move @@ -1,6 +1,5 @@ /// The `string` module defines the `String` type which represents UTF8 encoded strings. module std::string { - use std::vector; use std::option::{Self, Option}; /// An invalid UTF8 encoding. @@ -30,49 +29,49 @@ module std::string { } /// Returns a reference to the underlying byte vector. - public fun bytes(s: &String): &vector { - &s.bytes + public fun bytes(self: &String): &vector { + &self.bytes } /// Checks whether this string is empty. - public fun is_empty(s: &String): bool { - vector::is_empty(&s.bytes) + public fun is_empty(self: &String): bool { + self.bytes.is_empty() } /// Returns the length of this string, in bytes. - public fun length(s: &String): u64 { - vector::length(&s.bytes) + public fun length(self: &String): u64 { + self.bytes.length() } /// Appends a string. - public fun append(s: &mut String, r: String) { - vector::append(&mut s.bytes, r.bytes) + public fun append(self: &mut String, r: String) { + self.bytes.append(r.bytes) } /// Appends bytes which must be in valid utf8 format. - public fun append_utf8(s: &mut String, bytes: vector) { - append(s, utf8(bytes)) + public fun append_utf8(self: &mut String, bytes: vector) { + self.append(utf8(bytes)) } /// Insert the other string at the byte index in given string. The index must be at a valid utf8 char /// boundary. - public fun insert(s: &mut String, at: u64, o: String) { - let bytes = &s.bytes; - assert!(at <= vector::length(bytes) && internal_is_char_boundary(bytes, at), EINVALID_INDEX); - let l = length(s); - let front = sub_string(s, 0, at); - let end = sub_string(s, at, l); - append(&mut front, o); - append(&mut front, end); - *s = front; + public fun insert(self: &mut String, at: u64, o: String) { + let bytes = &self.bytes; + assert!(at <= bytes.length() && internal_is_char_boundary(bytes, at), EINVALID_INDEX); + let l = self.length(); + let front = self.sub_string(0, at); + let end = self.sub_string(at, l); + front.append(o); + front.append(end); + *self = front; } /// Returns a sub-string using the given byte indices, where `i` is the first byte position and `j` is the start /// of the first byte not included (or the length of the string). The indices must be at valid utf8 char boundaries, /// guaranteeing that the result is valid utf8. - public fun sub_string(s: &String, i: u64, j: u64): String { - let bytes = &s.bytes; - let l = vector::length(bytes); + public fun sub_string(self: &String, i: u64, j: u64): String { + let bytes = &self.bytes; + let l = bytes.length(); assert!( j <= l && i <= j && internal_is_char_boundary(bytes, i) && internal_is_char_boundary(bytes, j), EINVALID_INDEX @@ -81,8 +80,8 @@ module std::string { } /// Computes the index of the first occurrence of a string. Returns `length(s)` if no occurrence found. - public fun index_of(s: &String, r: &String): u64 { - internal_index_of(&s.bytes, &r.bytes) + public fun index_of(self: &String, r: &String): u64 { + internal_index_of(&self.bytes, &r.bytes) } // Native API diff --git a/aptos-move/framework/move-stdlib/sources/vector.move b/aptos-move/framework/move-stdlib/sources/vector.move index 05368acf4edbf..94d8f7f8fae6e 100644 --- a/aptos-move/framework/move-stdlib/sources/vector.move +++ b/aptos-move/framework/move-stdlib/sources/vector.move @@ -9,6 +9,8 @@ /// Move functions here because many have loops, requiring loop invariants to prove, and /// the return on investment didn't seem worth it for these simple functions. module std::vector { + use std::mem; + /// The index into the vector is out of bounds const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; @@ -24,47 +26,69 @@ module std::vector { /// The range in `slice` is invalid. const EINVALID_SLICE_RANGE: u64 = 0x20004; + /// Whether to utilize native vector::move_range + /// Vector module cannot call features module, due to cyclic dependency, + /// so this is a constant. + const USE_MOVE_RANGE: bool = true; + #[bytecode_instruction] /// Create an empty vector. native public fun empty(): vector; #[bytecode_instruction] /// Return the length of the vector. - native public fun length(v: &vector): u64; + native public fun length(self: &vector): u64; #[bytecode_instruction] - /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Acquire an immutable reference to the `i`th element of the vector `self`. /// Aborts if `i` is out of bounds. - native public fun borrow(v: &vector, i: u64): ∈ + native public fun borrow(self: &vector, i: u64): ∈ #[bytecode_instruction] - /// Add element `e` to the end of the vector `v`. - native public fun push_back(v: &mut vector, e: Element); + /// Add element `e` to the end of the vector `self`. + native public fun push_back(self: &mut vector, e: Element); #[bytecode_instruction] - /// Return a mutable reference to the `i`th element in the vector `v`. + /// Return a mutable reference to the `i`th element in the vector `self`. /// Aborts if `i` is out of bounds. - native public fun borrow_mut(v: &mut vector, i: u64): &mut Element; + native public fun borrow_mut(self: &mut vector, i: u64): &mut Element; #[bytecode_instruction] - /// Pop an element from the end of vector `v`. - /// Aborts if `v` is empty. - native public fun pop_back(v: &mut vector): Element; + /// Pop an element from the end of vector `self`. + /// Aborts if `self` is empty. + native public fun pop_back(self: &mut vector): Element; #[bytecode_instruction] - /// Destroy the vector `v`. - /// Aborts if `v` is not empty. - native public fun destroy_empty(v: vector); + /// Destroy the vector `self`. + /// Aborts if `self` is not empty. + native public fun destroy_empty(self: vector); #[bytecode_instruction] - /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Swaps the elements at the `i`th and `j`th indices in the vector `self`. /// Aborts if `i` or `j` is out of bounds. - native public fun swap(v: &mut vector, i: u64, j: u64); + native public fun swap(self: &mut vector, i: u64, j: u64); + + /// Moves range of elements `[removal_position, removal_position + length)` from vector `from`, + /// to vector `to`, inserting them starting at the `insert_position`. + /// In the `from` vector, elements after the selected range are moved left to fill the hole + /// (i.e. range is removed, while the order of the rest of the elements is kept) + /// In the `to` vector, elements after the `insert_position` are moved to the right to make + /// space for new elements (i.e. range is inserted, while the order of the rest of the + /// elements is kept). + /// Move prevents from having two mutable references to the same value, so `from` and `to` + /// vectors are always distinct. + native public fun move_range( + from: &mut vector, + removal_position: u64, + length: u64, + to: &mut vector, + insert_position: u64 + ); /// Return an vector of size one containing element `e`. public fun singleton(e: Element): vector { let v = empty(); - push_back(&mut v, e); + v.push_back(e); v } spec singleton { @@ -72,35 +96,42 @@ module std::vector { ensures result == vec(e); } - /// Reverses the order of the elements in the vector `v` in place. - public fun reverse(v: &mut vector) { - let len = length(v); - reverse_slice(v, 0, len); + /// Reverses the order of the elements in the vector `self` in place. + public fun reverse(self: &mut vector) { + let len = self.length(); + self.reverse_slice(0, len); } spec reverse { pragma intrinsic = true; } - /// Reverses the order of the elements [left, right) in the vector `v` in place. - public fun reverse_slice(v: &mut vector, left: u64, right: u64) { + /// Reverses the order of the elements [left, right) in the vector `self` in place. + public fun reverse_slice(self: &mut vector, left: u64, right: u64) { assert!(left <= right, EINVALID_RANGE); if (left == right) return; - right = right - 1; + right -= 1; while (left < right) { - swap(v, left, right); - left = left + 1; - right = right - 1; + self.swap(left, right); + left += 1; + right -= 1; } } spec reverse_slice { pragma intrinsic = true; } - /// Pushes all of the elements of the `other` vector into the `lhs` vector. - public fun append(lhs: &mut vector, other: vector) { - reverse(&mut other); - reverse_append(lhs, other); + /// Pushes all of the elements of the `other` vector into the `self` vector. + public fun append(self: &mut vector, other: vector) { + if (USE_MOVE_RANGE) { + let self_length = self.length(); + let other_length = other.length(); + move_range(&mut other, 0, other_length, self, self_length); + other.destroy_empty(); + } else { + other.reverse(); + self.reverse_append(other); + } } spec append { pragma intrinsic = true; @@ -109,37 +140,53 @@ module std::vector { pragma intrinsic = true; } - /// Pushes all of the elements of the `other` vector into the `lhs` vector. - public fun reverse_append(lhs: &mut vector, other: vector) { - let len = length(&other); + /// Pushes all of the elements of the `other` vector into the `self` vector. + public fun reverse_append(self: &mut vector, other: vector) { + let len = other.length(); while (len > 0) { - push_back(lhs, pop_back(&mut other)); - len = len - 1; + self.push_back(other.pop_back()); + len -= 1; }; - destroy_empty(other); + other.destroy_empty(); } spec reverse_append { pragma intrinsic = true; } - /// Trim a vector to a smaller size, returning the evicted elements in order - public fun trim(v: &mut vector, new_len: u64): vector { - let res = trim_reverse(v, new_len); - reverse(&mut res); - res + /// Splits (trims) the collection into two at the given index. + /// Returns a newly allocated vector containing the elements in the range [new_len, len). + /// After the call, the original vector will be left containing the elements [0, new_len) + /// with its previous capacity unchanged. + /// In many languages this is also called `split_off`. + public fun trim(self: &mut vector, new_len: u64): vector { + let len = self.length(); + assert!(new_len <= len, EINDEX_OUT_OF_BOUNDS); + + let other = empty(); + if (USE_MOVE_RANGE) { + move_range(self, new_len, len - new_len, &mut other, 0); + } else { + while (len > new_len) { + other.push_back(self.pop_back()); + len -= 1; + }; + other.reverse(); + }; + + other } spec trim { pragma intrinsic = true; } /// Trim a vector to a smaller size, returning the evicted elements in reverse order - public fun trim_reverse(v: &mut vector, new_len: u64): vector { - let len = length(v); + public fun trim_reverse(self: &mut vector, new_len: u64): vector { + let len = self.length(); assert!(new_len <= len, EINDEX_OUT_OF_BOUNDS); let result = empty(); while (new_len < len) { - push_back(&mut result, pop_back(v)); - len = len - 1; + result.push_back(self.pop_back()); + len -= 1; }; result } @@ -148,18 +195,18 @@ module std::vector { } - /// Return `true` if the vector `v` has no elements and `false` otherwise. - public fun is_empty(v: &vector): bool { - length(v) == 0 + /// Return `true` if the vector `self` has no elements and `false` otherwise. + public fun is_empty(self: &vector): bool { + self.length() == 0 } - /// Return true if `e` is in the vector `v`. - public fun contains(v: &vector, e: &Element): bool { + /// Return true if `e` is in the vector `self`. + public fun contains(self: &vector, e: &Element): bool { let i = 0; - let len = length(v); + let len = self.length(); while (i < len) { - if (borrow(v, i) == e) return true; - i = i + 1; + if (self.borrow(i) == e) return true; + i += 1; }; false } @@ -167,14 +214,14 @@ module std::vector { pragma intrinsic = true; } - /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Return `(true, i)` if `e` is in the vector `self` at index `i`. /// Otherwise, returns `(false, 0)`. - public fun index_of(v: &vector, e: &Element): (bool, u64) { + public fun index_of(self: &vector, e: &Element): (bool, u64) { let i = 0; - let len = length(v); + let len = self.length(); while (i < len) { - if (borrow(v, i) == e) return (true, i); - i = i + 1; + if (self.borrow(i) == e) return (true, i); + i += 1; }; (false, 0) } @@ -185,66 +232,99 @@ module std::vector { /// Return `(true, i)` if there's an element that matches the predicate. If there are multiple elements that match /// the predicate, only the index of the first one is returned. /// Otherwise, returns `(false, 0)`. - public inline fun find(v: &vector, f: |&Element|bool): (bool, u64) { + public inline fun find(self: &vector, f: |&Element|bool): (bool, u64) { let find = false; let found_index = 0; let i = 0; - let len = length(v); + let len = self.length(); while (i < len) { // Cannot call return in an inline function so we need to resort to break here. - if (f(borrow(v, i))) { + if (f(self.borrow(i))) { find = true; found_index = i; break }; - i = i + 1; + i += 1; }; (find, found_index) } /// Insert a new element at position 0 <= i <= length, using O(length - i) time. /// Aborts if out of bounds. - public fun insert(v: &mut vector, i: u64, e: Element) { - let len = length(v); + public fun insert(self: &mut vector, i: u64, e: Element) { + let len = self.length(); assert!(i <= len, EINDEX_OUT_OF_BOUNDS); - push_back(v, e); - while (i < len) { - swap(v, i, len); - i = i + 1; + + if (USE_MOVE_RANGE) { + if (i + 2 >= len) { + // When we are close to the end, it is cheaper to not create + // a temporary vector, and swap directly + self.push_back(e); + while (i < len) { + self.swap(i, len); + i += 1; + }; + } else { + let other = singleton(e); + move_range(&mut other, 0, 1, self, i); + other.destroy_empty(); + } + } else { + self.push_back(e); + while (i < len) { + self.swap(i, len); + i += 1; + }; }; } spec insert { pragma intrinsic = true; } - /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// Remove the `i`th element of the vector `self`, shifting all subsequent elements. /// This is O(n) and preserves ordering of elements in the vector. /// Aborts if `i` is out of bounds. - public fun remove(v: &mut vector, i: u64): Element { - let len = length(v); + public fun remove(self: &mut vector, i: u64): Element { + let len = self.length(); // i out of bounds; abort if (i >= len) abort EINDEX_OUT_OF_BOUNDS; - len = len - 1; - while (i < len) swap(v, i, { i = i + 1; i }); - pop_back(v) + if (USE_MOVE_RANGE) { + // When we are close to the end, it is cheaper to not create + // a temporary vector, and swap directly + if (i + 3 >= len) { + len -= 1; + while (i < len) self.swap(i, { i += 1; i }); + self.pop_back() + } else { + let other = empty(); + move_range(self, i, 1, &mut other, 0); + let result = other.pop_back(); + other.destroy_empty(); + result + } + } else { + len -= 1; + while (i < len) self.swap(i, { i += 1; i }); + self.pop_back() + } } spec remove { pragma intrinsic = true; } - /// Remove the first occurrence of a given value in the vector `v` and return it in a vector, shifting all + /// Remove the first occurrence of a given value in the vector `self` and return it in a vector, shifting all /// subsequent elements. /// This is O(n) and preserves ordering of elements in the vector. /// This returns an empty vector if the value isn't present in the vector. /// Note that this cannot return an option as option uses vector and there'd be a circular dependency between option /// and vector. - public fun remove_value(v: &mut vector, val: &Element): vector { + public fun remove_value(self: &mut vector, val: &Element): vector { // This doesn't cost a O(2N) run time as index_of scans from left to right and stops when the element is found, // while remove would continue from the identified index to the end of the vector. - let (found, index) = index_of(v, val); + let (found, index) = self.index_of(val); if (found) { - vector[remove(v, index)] + vector[self.remove(index)] } else { vector[] } @@ -253,222 +333,237 @@ module std::vector { pragma intrinsic = true; } - /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// Swap the `i`th element of the vector `self` with the last element and then pop the vector. /// This is O(1), but does not preserve ordering of elements in the vector. /// Aborts if `i` is out of bounds. - public fun swap_remove(v: &mut vector, i: u64): Element { - assert!(!is_empty(v), EINDEX_OUT_OF_BOUNDS); - let last_idx = length(v) - 1; - swap(v, i, last_idx); - pop_back(v) + public fun swap_remove(self: &mut vector, i: u64): Element { + assert!(!self.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = self.length() - 1; + self.swap(i, last_idx); + self.pop_back() } spec swap_remove { pragma intrinsic = true; } + /// Replace the `i`th element of the vector `self` with the given value, and return + /// to the caller the value that was there before. + /// Aborts if `i` is out of bounds. + public fun replace(self: &mut vector, i: u64, val: Element): Element { + let last_idx = self.length(); + assert!(i < last_idx, EINDEX_OUT_OF_BOUNDS); + if (USE_MOVE_RANGE) { + mem::replace(self.borrow_mut(i), val) + } else { + self.push_back(val); + self.swap(i, last_idx); + self.pop_back() + } + } + /// Apply the function to each element in the vector, consuming it. - public inline fun for_each(v: vector, f: |Element|) { - reverse(&mut v); // We need to reverse the vector to consume it efficiently - for_each_reverse(v, |e| f(e)); + public inline fun for_each(self: vector, f: |Element|) { + self.reverse(); // We need to reverse the vector to consume it efficiently + self.for_each_reverse(|e| f(e)); } /// Apply the function to each element in the vector, consuming it. - public inline fun for_each_reverse(v: vector, f: |Element|) { - let len = length(&v); + public inline fun for_each_reverse(self: vector, f: |Element|) { + let len = self.length(); while (len > 0) { - f(pop_back(&mut v)); - len = len - 1; + f(self.pop_back()); + len -= 1; }; - destroy_empty(v) + self.destroy_empty() } /// Apply the function to a reference of each element in the vector. - public inline fun for_each_ref(v: &vector, f: |&Element|) { + public inline fun for_each_ref(self: &vector, f: |&Element|) { let i = 0; - let len = length(v); + let len = self.length(); while (i < len) { - f(borrow(v, i)); - i = i + 1 + f(self.borrow(i)); + i += 1 } } /// Apply the function to each pair of elements in the two given vectors, consuming them. - public inline fun zip(v1: vector, v2: vector, f: |Element1, Element2|) { + public inline fun zip(self: vector, v2: vector, f: |Element1, Element2|) { // We need to reverse the vectors to consume it efficiently - reverse(&mut v1); - reverse(&mut v2); - zip_reverse(v1, v2, |e1, e2| f(e1, e2)); + self.reverse(); + v2.reverse(); + self.zip_reverse(v2, |e1, e2| f(e1, e2)); } /// Apply the function to each pair of elements in the two given vectors in the reverse order, consuming them. /// This errors out if the vectors are not of the same length. public inline fun zip_reverse( - v1: vector, + self: vector, v2: vector, f: |Element1, Element2|, ) { - let len = length(&v1); + let len = self.length(); // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(len == length(&v2), 0x20002); + assert!(len == v2.length(), 0x20002); while (len > 0) { - f(pop_back(&mut v1), pop_back(&mut v2)); - len = len - 1; + f(self.pop_back(), v2.pop_back()); + len -= 1; }; - destroy_empty(v1); - destroy_empty(v2); + self.destroy_empty(); + v2.destroy_empty(); } /// Apply the function to the references of each pair of elements in the two given vectors. /// This errors out if the vectors are not of the same length. public inline fun zip_ref( - v1: &vector, + self: &vector, v2: &vector, f: |&Element1, &Element2|, ) { - let len = length(v1); + let len = self.length(); // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(len == length(v2), 0x20002); + assert!(len == v2.length(), 0x20002); let i = 0; while (i < len) { - f(borrow(v1, i), borrow(v2, i)); - i = i + 1 + f(self.borrow(i), v2.borrow(i)); + i += 1 } } /// Apply the function to a reference of each element in the vector with its index. - public inline fun enumerate_ref(v: &vector, f: |u64, &Element|) { + public inline fun enumerate_ref(self: &vector, f: |u64, &Element|) { let i = 0; - let len = length(v); + let len = self.length(); while (i < len) { - f(i, borrow(v, i)); - i = i + 1; + f(i, self.borrow(i)); + i += 1; }; } /// Apply the function to a mutable reference to each element in the vector. - public inline fun for_each_mut(v: &mut vector, f: |&mut Element|) { + public inline fun for_each_mut(self: &mut vector, f: |&mut Element|) { let i = 0; - let len = length(v); + let len = self.length(); while (i < len) { - f(borrow_mut(v, i)); - i = i + 1 + f(self.borrow_mut(i)); + i += 1 } } /// Apply the function to mutable references to each pair of elements in the two given vectors. /// This errors out if the vectors are not of the same length. public inline fun zip_mut( - v1: &mut vector, + self: &mut vector, v2: &mut vector, f: |&mut Element1, &mut Element2|, ) { let i = 0; - let len = length(v1); + let len = self.length(); // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(len == length(v2), 0x20002); + assert!(len == v2.length(), 0x20002); while (i < len) { - f(borrow_mut(v1, i), borrow_mut(v2, i)); - i = i + 1 + f(self.borrow_mut(i), v2.borrow_mut(i)); + i += 1 } } /// Apply the function to a mutable reference of each element in the vector with its index. - public inline fun enumerate_mut(v: &mut vector, f: |u64, &mut Element|) { + public inline fun enumerate_mut(self: &mut vector, f: |u64, &mut Element|) { let i = 0; - let len = length(v); + let len = self.length(); while (i < len) { - f(i, borrow_mut(v, i)); - i = i + 1; + f(i, self.borrow_mut(i)); + i += 1; }; } /// Fold the function over the elements. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(f(f(0, 1), 2), 3)` public inline fun fold( - v: vector, + self: vector, init: Accumulator, f: |Accumulator,Element|Accumulator ): Accumulator { let accu = init; - for_each(v, |elem| accu = f(accu, elem)); + self.for_each(|elem| accu = f(accu, elem)); accu } /// Fold right like fold above but working right to left. For example, `fold(vector[1,2,3], 0, f)` will execute /// `f(1, f(2, f(3, 0)))` public inline fun foldr( - v: vector, + self: vector, init: Accumulator, f: |Element, Accumulator|Accumulator ): Accumulator { let accu = init; - for_each_reverse(v, |elem| accu = f(elem, accu)); + self.for_each_reverse(|elem| accu = f(elem, accu)); accu } /// Map the function over the references of the elements of the vector, producing a new vector without modifying the /// original vector. public inline fun map_ref( - v: &vector, + self: &vector, f: |&Element|NewElement ): vector { let result = vector[]; - for_each_ref(v, |elem| push_back(&mut result, f(elem))); + self.for_each_ref(|elem| result.push_back(f(elem))); result } /// Map the function over the references of the element pairs of two vectors, producing a new vector from the return /// values without modifying the original vectors. public inline fun zip_map_ref( - v1: &vector, + self: &vector, v2: &vector, f: |&Element1, &Element2|NewElement ): vector { // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(length(v1) == length(v2), 0x20002); + assert!(self.length() == v2.length(), 0x20002); let result = vector[]; - zip_ref(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + self.zip_ref(v2, |e1, e2| result.push_back(f(e1, e2))); result } /// Map the function over the elements of the vector, producing a new vector. public inline fun map( - v: vector, + self: vector, f: |Element|NewElement ): vector { let result = vector[]; - for_each(v, |elem| push_back(&mut result, f(elem))); + self.for_each(|elem| result.push_back(f(elem))); result } /// Map the function over the element pairs of the two vectors, producing a new vector. public inline fun zip_map( - v1: vector, + self: vector, v2: vector, f: |Element1, Element2|NewElement ): vector { // We can't use the constant EVECTORS_LENGTH_MISMATCH here as all calling code would then need to define it // due to how inline functions work. - assert!(length(&v1) == length(&v2), 0x20002); + assert!(self.length() == v2.length(), 0x20002); let result = vector[]; - zip(v1, v2, |e1, e2| push_back(&mut result, f(e1, e2))); + self.zip(v2, |e1, e2| result.push_back(f(e1, e2))); result } /// Filter the vector using the boolean function, removing all elements for which `p(e)` is not true. public inline fun filter( - v: vector, + self: vector, p: |&Element|bool ): vector { let result = vector[]; - for_each(v, |elem| { - if (p(&elem)) push_back(&mut result, elem); + self.for_each(|elem| { + if (p(&elem)) result.push_back(elem); }); result } @@ -477,23 +572,23 @@ module std::vector { /// Preserves the relative order of the elements for which pred is true, /// BUT NOT for the elements for which pred is false. public inline fun partition( - v: &mut vector, + self: &mut vector, pred: |&Element|bool ): u64 { let i = 0; - let len = length(v); + let len = self.length(); while (i < len) { - if (!pred(borrow(v, i))) break; - i = i + 1; + if (!pred(self.borrow(i))) break; + i += 1; }; let p = i; - i = i + 1; + i += 1; while (i < len) { - if (pred(borrow(v, i))) { - swap(v, p, i); - p = p + 1; + if (pred(self.borrow(i))) { + self.swap(p, i); + p += 1; }; - i = i + 1; + i += 1; }; p } @@ -501,11 +596,11 @@ module std::vector { /// rotate(&mut [1, 2, 3, 4, 5], 2) -> [3, 4, 5, 1, 2] in place, returns the split point /// ie. 3 in the example above public fun rotate( - v: &mut vector, + self: &mut vector, rot: u64 ): u64 { - let len = length(v); - rotate_slice(v, 0, rot, len) + let len = self.length(); + self.rotate_slice(0, rot, len) } spec rotate { pragma intrinsic = true; @@ -514,14 +609,14 @@ module std::vector { /// Same as above but on a sub-slice of an array [left, right) with left <= rot <= right /// returns the public fun rotate_slice( - v: &mut vector, + self: &mut vector, left: u64, rot: u64, right: u64 ): u64 { - reverse_slice(v, left, rot); - reverse_slice(v, rot, right); - reverse_slice(v, left, right); + self.reverse_slice(left, rot); + self.reverse_slice(rot, right); + self.reverse_slice(left, right); left + (right - rot) } spec rotate_slice { @@ -531,57 +626,57 @@ module std::vector { /// Partition the array based on a predicate p, this routine is stable and thus /// preserves the relative order of the elements in the two partitions. public inline fun stable_partition( - v: &mut vector, + self: &mut vector, p: |&Element|bool ): u64 { - let len = length(v); + let len = self.length(); let t = empty(); let f = empty(); while (len > 0) { - let e = pop_back(v); + let e = self.pop_back(); if (p(&e)) { - push_back(&mut t, e); + t.push_back(e); } else { - push_back(&mut f, e); + f.push_back(e); }; - len = len - 1; + len -= 1; }; - let pos = length(&t); - reverse_append(v, t); - reverse_append(v, f); + let pos = t.length(); + self.reverse_append(t); + self.reverse_append(f); pos } /// Return true if any element in the vector satisfies the predicate. public inline fun any( - v: &vector, + self: &vector, p: |&Element|bool ): bool { let result = false; let i = 0; - while (i < length(v)) { - result = p(borrow(v, i)); + while (i < self.length()) { + result = p(self.borrow(i)); if (result) { break }; - i = i + 1 + i += 1 }; result } /// Return true if all elements in the vector satisfy the predicate. public inline fun all( - v: &vector, + self: &vector, p: |&Element|bool ): bool { let result = true; let i = 0; - while (i < length(v)) { - result = p(borrow(v, i)); + while (i < self.length()) { + result = p(self.borrow(i)); if (!result) { break }; - i = i + 1 + i += 1 }; result } @@ -589,10 +684,10 @@ module std::vector { /// Destroy a vector, just a wrapper around for_each_reverse with a descriptive name /// when used in the context of destroying a vector. public inline fun destroy( - v: vector, + self: vector, d: |Element| ) { - for_each_reverse(v, |e| d(e)) + self.for_each_reverse(|e| d(e)) } public fun range(start: u64, end: u64): vector { @@ -604,23 +699,23 @@ module std::vector { let vec = vector[]; while (start < end) { - push_back(&mut vec, start); - start = start + step; + vec.push_back(start); + start += step; }; vec } public fun slice( - v: &vector, + self: &vector, start: u64, end: u64 ): vector { - assert!(start <= end && end <= length(v), EINVALID_SLICE_RANGE); + assert!(start <= end && end <= self.length(), EINVALID_SLICE_RANGE); let vec = vector[]; while (start < end) { - push_back(&mut vec, *borrow(v, start)); - start = start + 1; + vec.push_back(self[start]); + start += 1; }; vec } @@ -633,24 +728,24 @@ module std::vector { /// # Helper Functions spec module { - /// Check if `v1` is equal to the result of adding `e` at the end of `v2` - fun eq_push_back(v1: vector, v2: vector, e: Element): bool { - len(v1) == len(v2) + 1 && - v1[len(v1)-1] == e && - v1[0..len(v1)-1] == v2[0..len(v2)] + /// Check if `self` is equal to the result of adding `e` at the end of `v2` + fun eq_push_back(self: vector, v2: vector, e: Element): bool { + len(self) == len(v2) + 1 && + self[len(self)-1] == e && + self[0..len(self)-1] == v2[0..len(v2)] } - /// Check if `v` is equal to the result of concatenating `v1` and `v2` - fun eq_append(v: vector, v1: vector, v2: vector): bool { - len(v) == len(v1) + len(v2) && - v[0..len(v1)] == v1 && - v[len(v1)..len(v)] == v2 + /// Check if `self` is equal to the result of concatenating `v1` and `v2` + fun eq_append(self: vector, v1: vector, v2: vector): bool { + len(self) == len(v1) + len(v2) && + self[0..len(v1)] == v1 && + self[len(v1)..len(self)] == v2 } - /// Check `v1` is equal to the result of removing the first element of `v2` - fun eq_pop_front(v1: vector, v2: vector): bool { - len(v1) + 1 == len(v2) && - v1 == v2[1..len(v2)] + /// Check `self` is equal to the result of removing the first element of `v2` + fun eq_pop_front(self: vector, v2: vector): bool { + len(self) + 1 == len(v2) && + self == v2[1..len(v2)] } /// Check that `v1` is equal to the result of removing the element at index `i` from `v2`. @@ -660,9 +755,9 @@ module std::vector { v1[i..len(v1)] == v2[i + 1..len(v2)] } - /// Check if `v` contains `e`. - fun spec_contains(v: vector, e: Element): bool { - exists x in v: x == e + /// Check if `self` contains `e`. + fun spec_contains(self: vector, e: Element): bool { + exists x in self: x == e } } diff --git a/aptos-move/framework/move-stdlib/src/natives/bcs.rs b/aptos-move/framework/move-stdlib/src/natives/bcs.rs index c5941a401e06c..8781b367ae46c 100644 --- a/aptos-move/framework/move-stdlib/src/natives/bcs.rs +++ b/aptos-move/framework/move-stdlib/src/natives/bcs.rs @@ -11,16 +11,26 @@ use aptos_native_interface::{ SafeNativeResult, }; use move_core_types::{ - gas_algebra::NumBytes, vm_status::sub_status::NFE_BCS_SERIALIZATION_FAILURE, + account_address::AccountAddress, + gas_algebra::{NumBytes, NumTypeNodes}, + u256, + value::{MoveStructLayout, MoveTypeLayout}, + vm_status::{sub_status::NFE_BCS_SERIALIZATION_FAILURE, StatusCode}, }; use move_vm_runtime::native_functions::NativeFunction; use move_vm_types::{ loaded_data::runtime_types::Type, - values::{values_impl::Reference, Value}, + natives::function::{PartialVMError, PartialVMResult}, + value_serde::ValueSerDeContext, + values::{values_impl::Reference, Struct, Value}, }; use smallvec::{smallvec, SmallVec}; use std::collections::VecDeque; +pub fn create_option_u64(value: Option) -> Value { + Value::struct_(Struct::pack(vec![Value::vector_u64(value)])) +} + /*************************************************************************************************** * native fun to_bytes * @@ -42,11 +52,9 @@ fn native_to_bytes( debug_assert!(ty_args.len() == 1); debug_assert!(args.len() == 1); - // pop type and value let ref_to_val = safely_pop_arg!(args, Reference); let arg_type = ty_args.pop().unwrap(); - // get type layout let layout = match context.type_to_type_layout(&arg_type) { Ok(layout) => layout, Err(_) => { @@ -57,9 +65,16 @@ fn native_to_bytes( }, }; - // serialize value + // TODO(#14175): Reading the reference performs a deep copy, and we can + // implement it in a more efficient way. let val = ref_to_val.read_ref()?; - let serialized_value = match val.simple_serialize(&layout) { + + let function_value_extension = context.function_value_extension(); + let serialized_value = match ValueSerDeContext::new() + .with_legacy_signer() + .with_func_args_deserialization(&function_value_extension) + .serialize(&val, &layout)? + { Some(serialized_value) => serialized_value, None => { context.charge(BCS_TO_BYTES_FAILURE)?; @@ -74,13 +89,175 @@ fn native_to_bytes( Ok(smallvec![Value::vector_u8(serialized_value)]) } +/*************************************************************************************************** + * native fun serialized_size + * + * gas cost: size_of(output) + * + * If the getting the type layout or serialization results in error, a special failure + * cost is charged. + * + **************************************************************************************************/ +fn native_serialized_size( + context: &mut SafeNativeContext, + mut ty_args: Vec, + mut args: VecDeque, +) -> SafeNativeResult> { + debug_assert!(ty_args.len() == 1); + debug_assert!(args.len() == 1); + + context.charge(BCS_SERIALIZED_SIZE_BASE)?; + + let reference = safely_pop_arg!(args, Reference); + let ty = ty_args.pop().unwrap(); + + let serialized_size = match serialized_size_impl(context, reference, &ty) { + Ok(serialized_size) => serialized_size as u64, + Err(_) => { + context.charge(BCS_SERIALIZED_SIZE_FAILURE)?; + + // Re-use the same abort code as bcs::to_bytes. + return Err(SafeNativeError::Abort { + abort_code: NFE_BCS_SERIALIZATION_FAILURE, + }); + }, + }; + context.charge(BCS_SERIALIZED_SIZE_PER_BYTE_SERIALIZED * NumBytes::new(serialized_size))?; + + Ok(smallvec![Value::u64(serialized_size)]) +} + +fn serialized_size_impl( + context: &mut SafeNativeContext, + reference: Reference, + ty: &Type, +) -> PartialVMResult { + // TODO(#14175): Reading the reference performs a deep copy, and we can + // implement it in a more efficient way. + let value = reference.read_ref()?; + let ty_layout = context.type_to_type_layout(ty)?; + + let function_value_extension = context.function_value_extension(); + ValueSerDeContext::new() + .with_legacy_signer() + .with_func_args_deserialization(&function_value_extension) + .with_delayed_fields_serde() + .serialized_size(&value, &ty_layout) +} + +fn native_constant_serialized_size( + context: &mut SafeNativeContext, + mut ty_args: Vec, + _args: VecDeque, +) -> SafeNativeResult> { + debug_assert!(ty_args.len() == 1); + + context.charge(BCS_CONSTANT_SERIALIZED_SIZE_BASE)?; + + let ty = ty_args.pop().unwrap(); + let ty_layout = context.type_to_type_layout(&ty)?; + + let (visited_count, serialized_size_result) = constant_serialized_size(&ty_layout); + context + .charge(BCS_CONSTANT_SERIALIZED_SIZE_PER_TYPE_NODE * NumTypeNodes::new(visited_count))?; + + let result = match serialized_size_result { + Ok(value) => create_option_u64(value.map(|v| v as u64)), + Err(_) => { + context.charge(BCS_SERIALIZED_SIZE_FAILURE)?; + + // Re-use the same abort code as bcs::to_bytes. + return Err(SafeNativeError::Abort { + abort_code: NFE_BCS_SERIALIZATION_FAILURE, + }); + }, + }; + + Ok(smallvec![result]) +} + +/// If given type has a constant serialized size (irrespective of the instance), it returns the serialized +/// size in bytes any value would have. +/// Otherwise it returns None. +/// First element of the returned tuple represents number of visited nodes, used to charge gas. +fn constant_serialized_size(ty_layout: &MoveTypeLayout) -> (u64, PartialVMResult>) { + let mut visited_count = 1; + let bcs_size_result = match ty_layout { + MoveTypeLayout::Bool => bcs::serialized_size(&false).map(Some), + MoveTypeLayout::U8 => bcs::serialized_size(&0u8).map(Some), + MoveTypeLayout::U16 => bcs::serialized_size(&0u16).map(Some), + MoveTypeLayout::U32 => bcs::serialized_size(&0u32).map(Some), + MoveTypeLayout::U64 => bcs::serialized_size(&0u64).map(Some), + MoveTypeLayout::U128 => bcs::serialized_size(&0u128).map(Some), + MoveTypeLayout::U256 => bcs::serialized_size(&u256::U256::zero()).map(Some), + MoveTypeLayout::Address => bcs::serialized_size(&AccountAddress::ZERO).map(Some), + // signer's size is VM implementation detail, and can change at will. + MoveTypeLayout::Signer => Ok(None), + // vectors have no constant size + MoveTypeLayout::Vector(_) => Ok(None), + // enums and functions have no constant size + MoveTypeLayout::Struct( + MoveStructLayout::RuntimeVariants(_) | MoveStructLayout::WithVariants(_), + ) + | MoveTypeLayout::Function => Ok(None), + MoveTypeLayout::Struct(MoveStructLayout::Runtime(fields)) => { + let mut total = Some(0); + for field in fields { + let (cur_visited_count, cur) = constant_serialized_size(field); + visited_count += cur_visited_count; + match cur { + Err(e) => return (visited_count, Err(e)), + Ok(Some(cur_value)) => total = total.map(|v| v + cur_value), + Ok(None) => { + total = None; + break; + }, + } + } + Ok(total) + }, + MoveTypeLayout::Struct(MoveStructLayout::WithFields(_)) + | MoveTypeLayout::Struct(MoveStructLayout::WithTypes { .. }) => { + return ( + visited_count, + Err( + PartialVMError::new(StatusCode::VALUE_SERIALIZATION_ERROR).with_message( + "Only runtime types expected, but found WithFields/WithTypes".to_string(), + ), + ), + ) + }, + MoveTypeLayout::Native(_, inner) => { + let (cur_visited_count, cur) = constant_serialized_size(inner); + visited_count += cur_visited_count; + match cur { + Err(e) => return (visited_count, Err(e)), + Ok(v) => Ok(v), + } + }, + }; + ( + visited_count, + bcs_size_result.map_err(|e| { + PartialVMError::new(StatusCode::VALUE_SERIALIZATION_ERROR).with_message(format!( + "failed to compute serialized size of a value: {:?}", + e + )) + }), + ) +} + /*************************************************************************************************** * module **************************************************************************************************/ pub fn make_all( builder: &SafeNativeBuilder, ) -> impl Iterator + '_ { - let funcs = [("to_bytes", native_to_bytes as RawSafeNative)]; + let funcs = [ + ("to_bytes", native_to_bytes as RawSafeNative), + ("serialized_size", native_serialized_size), + ("constant_serialized_size", native_constant_serialized_size), + ]; builder.make_named_natives(funcs) } diff --git a/aptos-move/framework/move-stdlib/src/natives/cmp.rs b/aptos-move/framework/move-stdlib/src/natives/cmp.rs new file mode 100644 index 0000000000000..d867e8a5dc0e4 --- /dev/null +++ b/aptos-move/framework/move-stdlib/src/natives/cmp.rs @@ -0,0 +1,75 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +//! Implementation of native functions for value comparison. + +use aptos_gas_schedule::gas_params::natives::move_stdlib::{ + CMP_COMPARE_BASE, CMP_COMPARE_PER_ABS_VAL_UNIT, +}; +use aptos_native_interface::{ + RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, SafeNativeResult, +}; +use move_core_types::vm_status::StatusCode; +use move_vm_runtime::native_functions::NativeFunction; +use move_vm_types::{ + loaded_data::runtime_types::Type, + natives::function::PartialVMError, + values::{Struct, Value}, +}; +use smallvec::{smallvec, SmallVec}; +use std::collections::VecDeque; + +const ORDERING_LESS_THAN_VARIANT: u16 = 0; +const ORDERING_EQUAL_VARIANT: u16 = 1; +const ORDERING_GREATER_THAN_VARIANT: u16 = 2; + +/*************************************************************************************************** + * native fun native_compare + * + * gas cost: CMP_COMPARE_BASE + CMP_COMPARE_PER_ABS_VAL_UNIT * dereferenced_size_of_both_values + * + **************************************************************************************************/ +fn native_compare( + context: &mut SafeNativeContext, + _ty_args: Vec, + args: VecDeque, +) -> SafeNativeResult> { + debug_assert!(args.len() == 2); + if args.len() != 2 { + return Err(SafeNativeError::InvariantViolation(PartialVMError::new( + StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, + ))); + } + + let cost = CMP_COMPARE_BASE + + CMP_COMPARE_PER_ABS_VAL_UNIT + * (context.abs_val_size_dereferenced(&args[0]) + + context.abs_val_size_dereferenced(&args[1])); + context.charge(cost)?; + + let ordering = args[0].compare(&args[1])?; + let ordering_move_variant = match ordering { + std::cmp::Ordering::Less => ORDERING_LESS_THAN_VARIANT, + std::cmp::Ordering::Equal => ORDERING_EQUAL_VARIANT, + std::cmp::Ordering::Greater => ORDERING_GREATER_THAN_VARIANT, + }; + + Ok(smallvec![Value::struct_(Struct::pack(vec![Value::u16( + ordering_move_variant + )]))]) +} + +/*************************************************************************************************** + * module + **************************************************************************************************/ +pub fn make_all( + builder: &SafeNativeBuilder, +) -> impl Iterator + '_ { + let natives = [("compare", native_compare as RawSafeNative)]; + + builder.make_named_natives(natives) +} diff --git a/aptos-move/framework/move-stdlib/src/natives/mem.rs b/aptos-move/framework/move-stdlib/src/natives/mem.rs new file mode 100644 index 0000000000000..578d4dc02d685 --- /dev/null +++ b/aptos-move/framework/move-stdlib/src/natives/mem.rs @@ -0,0 +1,64 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Implementation of native functions for memory manipulation. + +use aptos_gas_schedule::gas_params::natives::move_stdlib::MEM_SWAP_BASE; +use aptos_native_interface::{ + safely_pop_arg, RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, + SafeNativeResult, +}; +use aptos_types::error; +use move_vm_runtime::native_functions::NativeFunction; +use move_vm_types::{ + loaded_data::runtime_types::Type, + values::{Reference, Value}, +}; +use smallvec::{smallvec, SmallVec}; +use std::collections::VecDeque; + +/// The feature is not enabled. +pub const EFEATURE_NOT_ENABLED: u64 = 1; + +/*************************************************************************************************** + * native fun native_swap + * + * gas cost: MEM_SWAP_BASE + * + **************************************************************************************************/ +fn native_swap( + context: &mut SafeNativeContext, + _ty_args: Vec, + mut args: VecDeque, +) -> SafeNativeResult> { + if !context + .get_feature_flags() + .is_native_memory_operations_enabled() + { + return Err(SafeNativeError::Abort { + abort_code: error::unavailable(EFEATURE_NOT_ENABLED), + }); + } + + debug_assert!(args.len() == 2); + + context.charge(MEM_SWAP_BASE)?; + + let left = safely_pop_arg!(args, Reference); + let right = safely_pop_arg!(args, Reference); + + left.swap_values(right)?; + + Ok(smallvec![]) +} + +/*************************************************************************************************** + * module + **************************************************************************************************/ +pub fn make_all( + builder: &SafeNativeBuilder, +) -> impl Iterator + '_ { + let natives = [("swap", native_swap as RawSafeNative)]; + + builder.make_named_natives(natives) +} diff --git a/aptos-move/framework/move-stdlib/src/natives/mod.rs b/aptos-move/framework/move-stdlib/src/natives/mod.rs index 56b37bd332960..1ea4a32ebc93d 100644 --- a/aptos-move/framework/move-stdlib/src/natives/mod.rs +++ b/aptos-move/framework/move-stdlib/src/natives/mod.rs @@ -6,11 +6,14 @@ // SPDX-License-Identifier: Apache-2.0 pub mod bcs; +pub mod cmp; pub mod hash; +pub mod mem; pub mod signer; pub mod string; #[cfg(feature = "testing")] pub mod unit_test; +pub mod vector; use aptos_native_interface::SafeNativeBuilder; use move_core_types::account_address::AccountAddress; @@ -32,9 +35,12 @@ pub fn all_natives( builder.with_incremental_gas_charging(false, |builder| { add_natives!("bcs", bcs::make_all(builder)); + add_natives!("cmp", cmp::make_all(builder)); add_natives!("hash", hash::make_all(builder)); + add_natives!("mem", mem::make_all(builder)); add_natives!("signer", signer::make_all(builder)); add_natives!("string", string::make_all(builder)); + add_natives!("vector", vector::make_all(builder)); #[cfg(feature = "testing")] { add_natives!("unit_test", unit_test::make_all(builder)); diff --git a/aptos-move/framework/move-stdlib/src/natives/unit_test.rs b/aptos-move/framework/move-stdlib/src/natives/unit_test.rs index f23a1fb56b6f6..9e928af622264 100644 --- a/aptos-move/framework/move-stdlib/src/natives/unit_test.rs +++ b/aptos-move/framework/move-stdlib/src/natives/unit_test.rs @@ -38,7 +38,7 @@ fn native_create_signers_for_testing( let num_signers = safely_pop_arg!(args, u64); let signers = Value::vector_for_testing_only( - (0..num_signers).map(|i| Value::signer(AccountAddress::new(to_le_bytes(i)))), + (0..num_signers).map(|i| Value::master_signer(AccountAddress::new(to_le_bytes(i)))), ); Ok(smallvec![signers]) diff --git a/aptos-move/framework/move-stdlib/src/natives/vector.rs b/aptos-move/framework/move-stdlib/src/natives/vector.rs new file mode 100644 index 0000000000000..3cf6685630ac4 --- /dev/null +++ b/aptos-move/framework/move-stdlib/src/natives/vector.rs @@ -0,0 +1,116 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +//! Implementation of native functions (non-bytecode instructions) for vector. + +use aptos_gas_schedule::gas_params::natives::move_stdlib::{ + VECTOR_MOVE_RANGE_BASE, VECTOR_MOVE_RANGE_PER_INDEX_MOVED, +}; +use aptos_native_interface::{ + safely_pop_arg, RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, + SafeNativeResult, +}; +use aptos_types::error; +use move_core_types::gas_algebra::NumArgs; +use move_vm_runtime::native_functions::NativeFunction; +use move_vm_types::{ + loaded_data::runtime_types::Type, + values::{Value, VectorRef}, +}; +use smallvec::{smallvec, SmallVec}; +use std::collections::VecDeque; + +/// Given input positions/lengths are outside of vector boundaries. +pub const EINDEX_OUT_OF_BOUNDS: u64 = 1; + +/// The feature is not enabled. +pub const EFEATURE_NOT_ENABLED: u64 = 2; + +/*************************************************************************************************** + * native fun move_range(from: &mut vector, removal_position: u64, length: u64, to: &mut vector, insert_position: u64) + * + * gas cost: VECTOR_MOVE_RANGE_BASE + VECTOR_MOVE_RANGE_PER_INDEX_MOVED * num_elements_to_move + * + **************************************************************************************************/ +fn native_move_range( + context: &mut SafeNativeContext, + ty_args: Vec, + mut args: VecDeque, +) -> SafeNativeResult> { + if !context + .get_feature_flags() + .is_native_memory_operations_enabled() + { + return Err(SafeNativeError::Abort { + abort_code: error::unavailable(EFEATURE_NOT_ENABLED), + }); + } + + context.charge(VECTOR_MOVE_RANGE_BASE)?; + + let map_err = |_| SafeNativeError::Abort { + abort_code: error::invalid_argument(EINDEX_OUT_OF_BOUNDS), + }; + let insert_position = usize::try_from(safely_pop_arg!(args, u64)).map_err(map_err)?; + let to = safely_pop_arg!(args, VectorRef); + let length = usize::try_from(safely_pop_arg!(args, u64)).map_err(map_err)?; + let removal_position = usize::try_from(safely_pop_arg!(args, u64)).map_err(map_err)?; + let from = safely_pop_arg!(args, VectorRef); + + // We need to charge before executing, so fetching and checking sizes here. + // We repeat fetching and checking of the sizes inside VectorRef::move_range call as well. + // Not sure if possible to combine (as we are never doing charging there). + let to_len = to.length_as_usize(&ty_args[0])?; + let from_len = from.length_as_usize(&ty_args[0])?; + + if removal_position + .checked_add(length) + .is_none_or(|end| end > from_len) + || insert_position > to_len + { + return Err(SafeNativeError::Abort { + abort_code: EINDEX_OUT_OF_BOUNDS, + }); + } + + // We are moving all elements in the range, all elements after range, and all elements after insertion point. + // We are counting "length" of moving block twice, as it both gets moved out and moved in. + // From calibration testing, this seems to be a reasonable approximation of the cost of the operation. + context.charge( + VECTOR_MOVE_RANGE_PER_INDEX_MOVED + * NumArgs::new( + (from_len - removal_position) + .checked_add(to_len - insert_position) + .and_then(|v| v.checked_add(length)) + .ok_or_else(|| SafeNativeError::Abort { + abort_code: EINDEX_OUT_OF_BOUNDS, + })? as u64, + ), + )?; + + VectorRef::move_range( + &from, + removal_position, + length, + &to, + insert_position, + &ty_args[0], + )?; + + Ok(smallvec![]) +} + +/*************************************************************************************************** + * module + **************************************************************************************************/ +pub fn make_all( + builder: &SafeNativeBuilder, +) -> impl Iterator + '_ { + let natives = [("move_range", native_move_range as RawSafeNative)]; + + builder.make_named_natives(natives) +} diff --git a/aptos-move/framework/move-stdlib/tests/bcs_tests.move b/aptos-move/framework/move-stdlib/tests/bcs_tests.move index 72437ebb00fd6..0502461104217 100644 --- a/aptos-move/framework/move-stdlib/tests/bcs_tests.move +++ b/aptos-move/framework/move-stdlib/tests/bcs_tests.move @@ -1,53 +1,103 @@ #[test_only] module std::bcs_tests { use std::bcs; + use std::option; + use std::signer; struct Box has copy, drop, store { x: T } + struct Box3 has copy, drop, store { x: Box> } + struct Box7 has copy, drop, store { x: Box3> } + struct Box15 has copy, drop, store { x: Box7> } + struct Box31 has copy, drop, store { x: Box15> } + struct Box63 has copy, drop, store { x: Box31> } - struct Box127 has copy, drop, store { x: Box63> } - /* Deactivated because of address size dependency - #[test] - fun bcs_address() { - let addr = @0x89b9f9d1fadc027cf9532d6f99041522; - let expected_output = x"89b9f9d1fadc027cf9532d6f99041522"; - assert!(bcs::to_bytes(&addr) == expected_output, 0); - } - */ + struct Box127 has copy, drop, store { x: Box63> } #[test] fun bcs_bool() { - let expected_output = x"01"; - assert!(bcs::to_bytes(&true) == expected_output, 0); + let expected_bytes = x"01"; + let actual_bytes = bcs::to_bytes(&true); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = actual_bytes.length(); + let actual_size = bcs::serialized_size(&true); + assert!(actual_size == expected_size, 1); + + assert!(option::some(actual_size) == bcs::constant_serialized_size(), 2); } #[test] fun bcs_u8() { - let expected_output = x"01"; - assert!(bcs::to_bytes(&1u8) == expected_output, 0); + let expected_bytes = x"01"; + let actual_bytes = bcs::to_bytes(&1u8); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = actual_bytes.length(); + let actual_size = bcs::serialized_size(&1u8); + assert!(actual_size == expected_size, 1); + + assert!(option::some(actual_size) == bcs::constant_serialized_size(), 2); } #[test] fun bcs_u64() { - let expected_output = x"0100000000000000"; - assert!(bcs::to_bytes(&1) == expected_output, 0); + let expected_bytes = x"0100000000000000"; + let actual_bytes = bcs::to_bytes(&1); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = actual_bytes.length(); + let actual_size = bcs::serialized_size(&1); + assert!(actual_size == expected_size, 1); + + assert!(option::some(actual_size) == bcs::constant_serialized_size(), 2); } #[test] fun bcs_u128() { - let expected_output = x"01000000000000000000000000000000"; - assert!(bcs::to_bytes(&1u128) == expected_output, 0); + let expected_bytes = x"01000000000000000000000000000000"; + let actual_bytes = bcs::to_bytes(&1u128); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = actual_bytes.length(); + let actual_size = bcs::serialized_size(&1u128); + assert!(actual_size == expected_size, 1); + + assert!(option::some(actual_size) == bcs::constant_serialized_size(), 2); } #[test] fun bcs_vec_u8() { let v = x"0f"; - let expected_output = x"010f"; - assert!(bcs::to_bytes(&v) == expected_output, 0); + + let expected_bytes = x"010f"; + let actual_bytes = bcs::to_bytes(&v); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = actual_bytes.length(); + let actual_size = bcs::serialized_size(&v); + assert!(actual_size == expected_size, 1); + + assert!(option::none() == bcs::constant_serialized_size>(), 2); + } + + #[test(creator = @0xcafe)] + fun bcs_address(creator: &signer) { + let v = signer::address_of(creator); + + let expected_bytes = x"000000000000000000000000000000000000000000000000000000000000CAFE"; + let actual_bytes = bcs::to_bytes(&v); + assert!(actual_bytes == expected_bytes, 0); + + let expected_size = actual_bytes.length(); + let actual_size = bcs::serialized_size(&v); + assert!(actual_size == expected_size, 1); + + assert!(option::some(actual_size) == bcs::constant_serialized_size
(), 2); } fun box3(x: T): Box3 { @@ -76,14 +126,48 @@ module std::bcs_tests { #[test] fun encode_128() { - bcs::to_bytes(&box127(true)); + let box = box127(true); + + let bytes = bcs::to_bytes(&box); + let expected_size = bytes.length(); + + let actual_size = bcs::serialized_size(&box); + assert!(actual_size == expected_size, 0); + + assert!(option::some(actual_size) == bcs::constant_serialized_size>(), 1); + assert!(option::none() == bcs::constant_serialized_size>>(), 2); + assert!(option::none() == bcs::constant_serialized_size>>(), 3); } - /* Deactivated because we now limit the depth of values you could create inside the VM + enum Singleton { + V1(u64), + } + + fun encode_enum() { + assert!(option::none() == bcs::constant_serialized_size()); + assert!(option::none() == bcs::constant_serialized_size>()); + } + + // test that serialization is little-endian, and so produces different + // ordering than "expected" natural ordering. #[test] - #[expected_failure(abort_code = 453, location = std::bcs)] - fun encode_129() { - bcs::to_bytes(&Box { x: box127(true) }); + fun bcs_comparison() { + let val = 256 * 4 + 2; + let other = 256 * 2 + 4; + + assert!(std::cmp::compare(&val, &other).is_gt()); + + let bytes_val = bcs::to_bytes(&val); + let bytes_other = bcs::to_bytes(&other); + + assert!(std::cmp::compare(&bytes_val, &bytes_other).is_lt()); + } + + #[test(s1 = @0x123)] + fun test_signer_serialization(s1: signer) { + assert!( + bcs::to_bytes(&s1) == bcs::to_bytes(&@0x123), + 1 + ); } - */ } diff --git a/aptos-move/framework/move-stdlib/tests/bit_vector_tests.move b/aptos-move/framework/move-stdlib/tests/bit_vector_tests.move index c03322fa92363..32a9a31dca2a4 100644 --- a/aptos-move/framework/move-stdlib/tests/bit_vector_tests.move +++ b/aptos-move/framework/move-stdlib/tests/bit_vector_tests.move @@ -7,26 +7,23 @@ module std::bit_vector_tests { let bitvector = bit_vector::new(k); let index = 0; while (index < k) { - bit_vector::set(&mut bitvector, index); - assert!(bit_vector::is_index_set(&bitvector, index), 0); - index = index + 1; - let index_to_right = index; - while (index_to_right < k) { - assert!(!bit_vector::is_index_set(&bitvector, index_to_right), 1); - index_to_right = index_to_right + 1; + bitvector.set(index); + assert!(bitvector.is_index_set(index), 0); + index += 1; + + for (index_to_right in index..k) { + assert!(!bitvector.is_index_set(index_to_right), 1); }; }; // now go back down unsetting index = 0; while (index < k) { - bit_vector::unset(&mut bitvector, index); - assert!(!bit_vector::is_index_set(&bitvector, index), 0); - index = index + 1; - let index_to_right = index; - while (index_to_right < k) { - assert!(bit_vector::is_index_set(&bitvector, index_to_right), 1); - index_to_right = index_to_right + 1; + bitvector.unset(index); + assert!(!bitvector.is_index_set(index), 0); + index += 1; + for (index_to_right in index..k) { + assert!(bitvector.is_index_set(index_to_right), 1); }; }; } @@ -35,21 +32,21 @@ module std::bit_vector_tests { #[expected_failure(abort_code = bit_vector::EINDEX)] fun set_bit_out_of_bounds() { let bitvector = bit_vector::new(bit_vector::word_size()); - bit_vector::set(&mut bitvector, bit_vector::word_size()); + bitvector.set(bit_vector::word_size()); } #[test] #[expected_failure(abort_code = bit_vector::EINDEX)] fun unset_bit_out_of_bounds() { let bitvector = bit_vector::new(bit_vector::word_size()); - bit_vector::unset(&mut bitvector, bit_vector::word_size()); + bitvector.unset(bit_vector::word_size()); } #[test] #[expected_failure(abort_code = bit_vector::EINDEX)] fun index_bit_out_of_bounds() { let bitvector = bit_vector::new(bit_vector::word_size()); - bit_vector::is_index_set(&mut bitvector, bit_vector::word_size()); + bitvector.is_index_set(bit_vector::word_size()); } #[test] @@ -65,47 +62,43 @@ module std::bit_vector_tests { #[test] fun longest_sequence_no_set_zero_index() { let bitvector = bit_vector::new(100); - assert!(bit_vector::longest_set_sequence_starting_at(&bitvector, 0) == 0, 0); + assert!(bitvector.longest_set_sequence_starting_at(0) == 0, 0); } #[test] fun longest_sequence_one_set_zero_index() { let bitvector = bit_vector::new(100); - bit_vector::set(&mut bitvector, 1); - assert!(bit_vector::longest_set_sequence_starting_at(&bitvector, 0) == 0, 0); + bitvector.set(1); + assert!(bitvector.longest_set_sequence_starting_at(0) == 0, 0); } #[test] fun longest_sequence_no_set_nonzero_index() { let bitvector = bit_vector::new(100); - assert!(bit_vector::longest_set_sequence_starting_at(&bitvector, 51) == 0, 0); + assert!(bitvector.longest_set_sequence_starting_at(51) == 0, 0); } #[test] fun longest_sequence_two_set_nonzero_index() { let bitvector = bit_vector::new(100); - bit_vector::set(&mut bitvector, 50); - bit_vector::set(&mut bitvector, 52); - assert!(bit_vector::longest_set_sequence_starting_at(&bitvector, 51) == 0, 0); + bitvector.set(50); + bitvector.set(52); + assert!(bitvector.longest_set_sequence_starting_at(51) == 0, 0); } #[test] fun longest_sequence_with_break() { let bitvector = bit_vector::new(100); - let i = 0; - while (i < 20) { - bit_vector::set(&mut bitvector, i); - i = i + 1; + for (i in 0..20) { + bitvector.set(i); }; // create a break in the run - i = i + 1; - while (i < 100) { - bit_vector::set(&mut bitvector, i); - i = i + 1; + for (i in 21..100) { + bitvector.set(i); }; - assert!(bit_vector::longest_set_sequence_starting_at(&bitvector, 0) == 20, 0); - assert!(bit_vector::longest_set_sequence_starting_at(&bitvector, 20) == 0, 0); - assert!(bit_vector::longest_set_sequence_starting_at(&bitvector, 21) == 100 - 21, 0); + assert!(bitvector.longest_set_sequence_starting_at(0) == 20, 0); + assert!(bitvector.longest_set_sequence_starting_at(20) == 0, 0); + assert!(bitvector.longest_set_sequence_starting_at(21) == 100 - 21, 0); } #[test] @@ -114,17 +107,16 @@ module std::bit_vector_tests { let bitvector = bit_vector::new(bitlen); let i = 0; - while (i < bitlen) { - bit_vector::set(&mut bitvector, i); - i = i + 1; + for (i in 0..bitlen) { + bitvector.set(i); }; i = bitlen - 1; while (i > 0) { - assert!(bit_vector::is_index_set(&bitvector, i), 0); - bit_vector::shift_left(&mut bitvector, 1); - assert!(!bit_vector::is_index_set(&bitvector, i), 1); - i = i - 1; + assert!(bitvector.is_index_set(i), 0); + bitvector.shift_left(1); + assert!(!bitvector.is_index_set(i), 1); + i -= 1; }; } @@ -134,20 +126,18 @@ module std::bit_vector_tests { let shift_amount = 133; let bitvector = bit_vector::new(bitlen); - bit_vector::set(&mut bitvector, 201); - assert!(bit_vector::is_index_set(&bitvector, 201), 0); + bitvector.set(201); + assert!(bitvector.is_index_set(201), 0); - bit_vector::shift_left(&mut bitvector, shift_amount); - assert!(bit_vector::is_index_set(&bitvector, 201 - shift_amount), 1); - assert!(!bit_vector::is_index_set(&bitvector, 201), 2); + bitvector.shift_left(shift_amount); + assert!(bitvector.is_index_set(201 - shift_amount), 1); + assert!(!bitvector.is_index_set(201), 2); // Make sure this shift clears all the bits - bit_vector::shift_left(&mut bitvector, bitlen - 1); + bitvector.shift_left(bitlen - 1); - let i = 0; - while (i < bitlen) { - assert!(!bit_vector::is_index_set(&bitvector, i), 3); - i = i + 1; + for (i in 0..bitlen) { + assert!(!bitvector.is_index_set(i), 3); } } @@ -158,28 +148,22 @@ module std::bit_vector_tests { let shift_amount = 3; let bitvector = bit_vector::new(bitlen); - let i = 0; - - while (i < bitlen) { - bit_vector::set(&mut bitvector, i); - i = i + 1; + for (i in 0..bitlen) { + bitvector.set(i); }; - bit_vector::unset(&mut bitvector, chosen_index); - assert!(!bit_vector::is_index_set(&bitvector, chosen_index), 0); - - bit_vector::shift_left(&mut bitvector, shift_amount); + bitvector.unset(chosen_index); + assert!(!bitvector.is_index_set(chosen_index), 0); - i = 0; + bitvector.shift_left(shift_amount); - while (i < bitlen) { + for (i in 0..bitlen) { // only chosen_index - shift_amount and the remaining bits should be BitVector::unset if ((i == chosen_index - shift_amount) || (i >= bitlen - shift_amount)) { - assert!(!bit_vector::is_index_set(&bitvector, i), 1); + assert!(!bitvector.is_index_set(i), 1); } else { - assert!(bit_vector::is_index_set(&bitvector, i), 2); + assert!(bitvector.is_index_set(i), 2); }; - i = i + 1; } } @@ -188,17 +172,15 @@ module std::bit_vector_tests { let bitlen = 133; let bitvector = bit_vector::new(bitlen); - let i = 0; - while (i < bitlen) { - bit_vector::set(&mut bitvector, i); - i = i + 1; + for (i in 0..bitlen) { + bitvector.set(i); }; - bit_vector::shift_left(&mut bitvector, bitlen - 1); - i = bitlen - 1; + bitvector.shift_left(bitlen - 1); + let i = bitlen - 1; while (i > 0) { - assert!(!bit_vector::is_index_set(&bitvector, i), 1); - i = i - 1; + assert!(!bitvector.is_index_set(i), 1); + i -= 1; }; } @@ -206,7 +188,7 @@ module std::bit_vector_tests { fun shift_left_more_than_size() { let bitlen = 133; let bitvector = bit_vector::new(bitlen); - bit_vector::shift_left(&mut bitvector, bitlen); + bitvector.shift_left(bitlen); } #[test] @@ -218,6 +200,6 @@ module std::bit_vector_tests { #[test] fun single_bit_bitvector() { let bitvector = bit_vector::new(1); - assert!(bit_vector::length(&bitvector) == 1, 0); + assert!(bitvector.length() == 1, 0); } } diff --git a/aptos-move/framework/move-stdlib/tests/fixedpoint32_tests.move b/aptos-move/framework/move-stdlib/tests/fixedpoint32_tests.move index 83513dbe0cfda..2530f4253abf7 100644 --- a/aptos-move/framework/move-stdlib/tests/fixedpoint32_tests.move +++ b/aptos-move/framework/move-stdlib/tests/fixedpoint32_tests.move @@ -28,7 +28,7 @@ module std::fixed_point32_tests { #[test] fun create_zero() { let x = fixed_point32::create_from_rational(0, 1); - assert!(fixed_point32::is_zero(x), 0); + assert!(x.is_zero(), 0); } #[test] @@ -94,7 +94,7 @@ module std::fixed_point32_tests { assert!(not_three == 2, 0); // Try again with a fraction slightly larger than 1/3. - let f = fixed_point32::create_from_raw_value(fixed_point32::get_raw_value(f) + 1); + let f = fixed_point32::create_from_raw_value(f.get_raw_value() + 1); let three = fixed_point32::multiply_u64(9, f); assert!(three == 3, 1); } @@ -103,8 +103,8 @@ module std::fixed_point32_tests { fun create_from_rational_max_numerator_denominator() { // Test creating a 1.0 fraction from the maximum u64 value. let f = fixed_point32::create_from_rational(18446744073709551615, 18446744073709551615); - let one = fixed_point32::get_raw_value(f); - assert!(one == 4294967296, 0); // 0x1.00000000 + let one = f.get_raw_value(); + assert!(one == 4294967296); // 0x1.00000000 } #[test] @@ -112,11 +112,11 @@ module std::fixed_point32_tests { let one = fixed_point32::create_from_rational(1, 1); let two = fixed_point32::create_from_rational(2, 1); let smaller_number1 = fixed_point32::min(one, two); - let val1 = fixed_point32::get_raw_value(smaller_number1); - assert!(val1 == 4294967296, 0); // 0x1.00000000 + let val1 = smaller_number1.get_raw_value(); + assert!(val1 == 4294967296, 1); // 0x1.00000000 let smaller_number2 = fixed_point32::min(two, one); - let val2 = fixed_point32::get_raw_value(smaller_number2); - assert!(val2 == 4294967296, 0); // 0x1.00000000 + let val2 = smaller_number2.get_raw_value(); + assert!(val2 == 4294967296, 2); // 0x1.00000000 } #[test] @@ -125,24 +125,24 @@ module std::fixed_point32_tests { let two = fixed_point32::create_from_rational(2, 1); let larger_number1 = fixed_point32::max(one, two); let larger_number2 = fixed_point32::max(two, one); - let val1 = fixed_point32::get_raw_value(larger_number1); - assert!(val1 == 8589934592, 0); // 0x2.00000000 - let val2 = fixed_point32::get_raw_value(larger_number2); - assert!(val2 == 8589934592, 0); // 0x2.00000000 + let val1 = larger_number1.get_raw_value(); + assert!(val1 == 8589934592, 1); // 0x2.00000000 + let val2 = larger_number2.get_raw_value(); + assert!(val2 == 8589934592, 2); // 0x2.00000000 } #[test] fun floor_can_return_the_correct_number_zero() { let point_five = fixed_point32::create_from_rational(1, 2); - let val = fixed_point32::floor(point_five); - assert!(val == 0, 0); + let val = point_five.floor(); + assert!(val == 0); } #[test] fun create_from_u64_create_correct_fixed_point_number() { let one = fixed_point32::create_from_u64(1); - let val = fixed_point32::get_raw_value(one); - assert!(val == 4294967296, 0); + let val = one.get_raw_value(); + assert!(val == 4294967296); } #[test] @@ -154,35 +154,35 @@ module std::fixed_point32_tests { #[test] fun floor_can_return_the_correct_number_one() { let three_point_five = fixed_point32::create_from_rational(7, 2); // 3.5 - let val = fixed_point32::floor(three_point_five); - assert!(val == 3, 0); + let val = three_point_five.floor(); + assert!(val == 3); } #[test] fun ceil_can_round_up_correctly() { let point_five = fixed_point32::create_from_rational(1, 2); // 0.5 - let val = fixed_point32::ceil(point_five); - assert!(val == 1, 0); + let val = point_five.ceil(); + assert!(val == 1); } #[test] fun ceil_will_not_change_if_number_already_integer() { let one = fixed_point32::create_from_rational(1, 1); // 0.5 - let val = fixed_point32::ceil(one); - assert!(val == 1, 0); + let val = one.ceil(); + assert!(val == 1); } #[test] fun round_can_round_up_correctly() { let point_five = fixed_point32::create_from_rational(1, 2); // 0.5 - let val = fixed_point32::round(point_five); - assert!(val == 1, 0); + let val = point_five.round(); + assert!(val == 1); } #[test] fun round_can_round_down_correctly() { let num = fixed_point32::create_from_rational(499, 1000); // 0.499 - let val = fixed_point32::round(num); - assert!(val == 0, 0); + let val = num.round(); + assert!(val == 0); } } diff --git a/aptos-move/framework/move-stdlib/tests/mem_tests.move b/aptos-move/framework/move-stdlib/tests/mem_tests.move new file mode 100644 index 0000000000000..5e8595c00185d --- /dev/null +++ b/aptos-move/framework/move-stdlib/tests/mem_tests.move @@ -0,0 +1,96 @@ +#[test_only] +module std::mem_tests { + use std::mem::{swap, replace}; + + #[test] + fun test_swap_ints() { + let a = 1; + let b = 2; + let v = vector[3, 4, 5, 6]; + + swap(&mut a, &mut b); + assert!(a == 2, 0); + assert!(b == 1, 1); + + swap(&mut a, &mut v[0]); + assert!(a == 3, 0); + assert!(v[0] == 2, 1); + + swap(&mut v[2], &mut a); + assert!(a == 5, 0); + assert!(v[2] == 3, 1); + } + + #[test] + fun test_replace_ints() { + let a = 1; + let b = 2; + + assert!(replace(&mut a, b) == 1, 0); + assert!(a == 2, 1); + } + + #[test_only] + struct SomeStruct has drop, key { + f: u64, + v: vector, + } + + #[test] + fun test_swap_struct() { + let a = 1; + let v = vector[20, 21]; + let s1 = SomeStruct { f: 2, v: vector[3, 4] }; + let s2 = SomeStruct { f: 5, v: vector[6, 7] }; + let vs = vector[SomeStruct { f: 8, v: vector[9, 10] }, SomeStruct { f: 11, v: vector[12, 13] }]; + + swap(&mut s1, &mut s2); + assert!(&s1 == &SomeStruct { f: 5, v: vector[6, 7] }, 0); + assert!(&s2 == &SomeStruct { f: 2, v: vector[3, 4] }, 1); + + swap(&mut s1.f, &mut a); + assert!(s1.f == 1, 2); + assert!(a == 5, 3); + + swap(&mut s1.f, &mut s1.v[0]); + assert!(s1.f == 6, 4); + assert!(s1.v[0] == 1, 5); + + swap(&mut s2, &mut vs[0]); + assert!(&s2 == &SomeStruct { f: 8, v: vector[9, 10] }, 6); + assert!(&vs[0] == &SomeStruct { f: 2, v: vector[3, 4] }, 7); + + swap(&mut s1.f, &mut v[0]); + assert!(s1.f == 20, 8); + assert!(v[0] == 6, 9); + } + + #[test(creator = @0xcafe)] + fun test_swap_resource(creator: &signer) acquires SomeStruct { + use std::signer; + { + move_to(creator, SomeStruct { f: 5, v: vector[6, 7] }); + }; + + { + let value = &mut SomeStruct[signer::address_of(creator)]; + let s1 = SomeStruct { f: 2, v: vector[3, 4] }; + let vs = vector[SomeStruct { f: 8, v: vector[9, 10] }, SomeStruct { f: 11, v: vector[12, 13] }]; + + swap(&mut s1, value); + assert!(&s1 == &SomeStruct { f: 5, v: vector[6, 7] }, 0); + assert!(value == &SomeStruct { f: 2, v: vector[3, 4] }, 1); + + swap(value, &mut vs[0]); + assert!(value == &SomeStruct { f: 8, v: vector[9, 10] }, 2); + assert!(&vs[0] == &SomeStruct { f: 2, v: vector[3, 4] }, 3); + + let v_ref = &mut value.v; + let other_v = vector[11, 12]; + swap(v_ref, &mut other_v); + + assert!(v_ref == &vector[11, 12], 4); + assert!(&other_v == &vector[9, 10], 5); + } + } +} diff --git a/aptos-move/framework/move-stdlib/tests/option_tests.move b/aptos-move/framework/move-stdlib/tests/option_tests.move index 3cf0b9274d3ce..c702ec9eb1b2c 100644 --- a/aptos-move/framework/move-stdlib/tests/option_tests.move +++ b/aptos-move/framework/move-stdlib/tests/option_tests.move @@ -1,20 +1,19 @@ #[test_only] module std::option_tests { use std::option; - use std::vector; #[test] fun option_none_is_none() { let none = option::none(); - assert!(option::is_none(&none), 0); - assert!(!option::is_some(&none), 1); + assert!(none.is_none(), 0); + assert!(!none.is_some(), 1); } #[test] fun option_some_is_some() { let some = option::some(5); - assert!(!option::is_none(&some), 0); - assert!(option::is_some(&some), 1); + assert!(!some.is_none(), 0); + assert!(some.is_some(), 1); } #[test] @@ -22,210 +21,208 @@ module std::option_tests { let none = option::none(); let some = option::some(5); let some_other = option::some(6); - assert!(option::contains(&some, &5), 0); - assert!(option::contains(&some_other, &6), 1); - assert!(!option::contains(&none, &5), 2); - assert!(!option::contains(&some_other, &5), 3); + assert!(some.contains(&5), 0); + assert!(some_other.contains(&6), 1); + assert!(!none.contains(&5), 2); + assert!(!some_other.contains(&5), 3); } #[test] fun option_borrow_some() { let some = option::some(5); let some_other = option::some(6); - assert!(*option::borrow(&some) == 5, 3); - assert!(*option::borrow(&some_other) == 6, 4); + assert!(*some.borrow() == 5, 3); + assert!(*some_other.borrow() == 6, 4); } #[test] #[expected_failure(abort_code = option::EOPTION_NOT_SET)] fun option_borrow_none() { - option::borrow(&option::none()); + option::none().borrow(); } #[test] fun borrow_mut_some() { let some = option::some(1); - let ref = option::borrow_mut(&mut some); + let ref = some.borrow_mut(); *ref = 10; - assert!(*option::borrow(&some) == 10, 0); + assert!(*some.borrow() == 10, 0); } #[test] #[expected_failure(abort_code = option::EOPTION_NOT_SET)] fun borrow_mut_none() { - option::borrow_mut(&mut option::none()); + option::none().borrow_mut(); } #[test] fun borrow_with_default() { let none = option::none(); let some = option::some(5); - assert!(*option::borrow_with_default(&some, &7) == 5, 0); - assert!(*option::borrow_with_default(&none, &7) == 7, 1); + assert!(*some.borrow_with_default(&7) == 5, 0); + assert!(*none.borrow_with_default(&7) == 7, 1); } #[test] fun get_with_default() { let none = option::none(); let some = option::some(5); - assert!(option::get_with_default(&some, 7) == 5, 0); - assert!(option::get_with_default(&none, 7) == 7, 1); + assert!(some.get_with_default(7) == 5, 0); + assert!(none.get_with_default(7) == 7, 1); } #[test] fun extract_some() { let opt = option::some(1); - assert!(option::extract(&mut opt) == 1, 0); - assert!(option::is_none(&opt), 1); + assert!(opt.extract() == 1, 0); + assert!(opt.is_none(), 1); } #[test] #[expected_failure(abort_code = option::EOPTION_NOT_SET)] fun extract_none() { - option::extract(&mut option::none()); + option::none().extract(); } #[test] fun swap_some() { let some = option::some(5); - assert!(option::swap(&mut some, 1) == 5, 0); - assert!(*option::borrow(&some) == 1, 1); + assert!(some.swap(1) == 5, 0); + assert!(*some.borrow() == 1, 1); } #[test] fun swap_or_fill_some() { let some = option::some(5); - assert!(option::swap_or_fill(&mut some, 1) == option::some(5), 0); - assert!(*option::borrow(&some) == 1, 1); + assert!(some.swap_or_fill(1) == option::some(5), 0); + assert!(*some.borrow() == 1, 1); } #[test] fun swap_or_fill_none() { let none = option::none(); - assert!(option::swap_or_fill(&mut none, 1) == option::none(), 0); - assert!(*option::borrow(&none) == 1, 1); + assert!(none.swap_or_fill(1) == option::none(), 0); + assert!(*none.borrow() == 1, 1); } #[test] #[expected_failure(abort_code = option::EOPTION_NOT_SET)] fun swap_none() { - option::swap(&mut option::none(), 1); + option::none().swap(1); } #[test] fun fill_none() { let none = option::none(); - option::fill(&mut none, 3); - assert!(option::is_some(&none), 0); - assert!(*option::borrow(&none) == 3, 1); + none.fill(3); + assert!(none.is_some(), 0); + assert!(*none.borrow() == 3, 1); } #[test] #[expected_failure(abort_code = option::EOPTION_IS_SET)] fun fill_some() { - option::fill(&mut option::some(3), 0); + option::some(3).fill(0); } #[test] fun destroy_with_default() { - assert!(option::destroy_with_default(option::none(), 4) == 4, 0); - assert!(option::destroy_with_default(option::some(4), 5) == 4, 1); + assert!(option::none().destroy_with_default(4) == 4, 0); + assert!(option::some(4).destroy_with_default(5) == 4, 1); } #[test] fun destroy_some() { - assert!(option::destroy_some(option::some(4)) == 4, 0); + assert!(option::some(4).destroy_some() == 4, 0); } #[test] #[expected_failure(abort_code = option::EOPTION_NOT_SET)] fun destroy_some_none() { - option::destroy_some(option::none()); + option::none().destroy_some(); } #[test] fun destroy_none() { - option::destroy_none(option::none()); + option::none().destroy_none(); } #[test] #[expected_failure(abort_code = option::EOPTION_IS_SET)] fun destroy_none_some() { - option::destroy_none(option::some(0)); + option::some(0).destroy_none(); } #[test] fun into_vec_some() { - let v = option::to_vec(option::some(0)); - assert!(vector::length(&v) == 1, 0); - let x = vector::pop_back(&mut v); + let v = option::some(0).to_vec(); + assert!(v.length() == 1, 0); + let x = v.pop_back(); assert!(x == 0, 1); } #[test] fun into_vec_none() { - let v: vector = option::to_vec(option::none()); - assert!(vector::is_empty(&v), 0); + let v: vector = option::none().to_vec(); + assert!(v.is_empty(), 0); } #[test] fun test_for_each() { let r = 0; - option::for_each(option::some(1), |x| r = x); + option::some(1).for_each(|x| r = x); assert!(r == 1, 0); r = 0; - option::for_each(option::none(), |x| r = x); + option::none().for_each(|x| r = x); assert!(r == 0, 1); } #[test] fun test_for_each_ref() { let r = 0; - option::for_each_ref(&option::some(1), |x| r = *x); + option::some(1).for_each_ref(|x| r = *x); assert!(r == 1, 0); r = 0; - option::for_each_ref(&option::none(), |x| r = *x); + option::none().for_each_ref(|x| r = *x); assert!(r == 0, 1); } #[test] fun test_for_each_mut() { let o = option::some(0); - option::for_each_mut(&mut o, |x| *x = 1); + o.for_each_mut(|x| *x = 1); assert!(o == option::some(1), 0); } #[test] fun test_fold() { - let r = option::fold(option::some(1), 1, |a, b| a + b); + let r = option::some(1).fold(1, |a, b| a + b); assert!(r == 2, 0); - let r = option::fold(option::none(), 1, |a, b| a + b); + let r = option::none().fold(1, |a, b| a + b); assert!(r == 1, 0); } #[test] fun test_map() { - let x = option::map(option::some(1), |e| e + 1); - assert!(option::extract(&mut x) == 2, 0); + let x = option::some(1).map(|e| e + 1); + assert!(x.extract() == 2, 0); } #[test] fun test_map_ref() { - let x = option::map_ref(&option::some(1), |e| *e + 1); - assert!(option::extract(&mut x) == 2, 0); + let x = option::some(1).map_ref(|e| *e + 1); + assert!(x.extract() == 2, 0); } #[test] fun test_filter() { - let x = option::filter(option::some(1), |e| *e != 1); - assert!(option::is_none(&x), 0); + let x = option::some(1).filter(|e| *e != 1); + assert!(x.is_none(), 0); } #[test] fun test_any() { - let r = option::any(&option::some(1), |e| *e == 1); + let r = option::some(1).any(|e| *e == 1); assert!(r, 0); } - - } diff --git a/aptos-move/framework/move-stdlib/tests/string_tests.move b/aptos-move/framework/move-stdlib/tests/string_tests.move index e7810a3422262..0d43a38c4345f 100644 --- a/aptos-move/framework/move-stdlib/tests/string_tests.move +++ b/aptos-move/framework/move-stdlib/tests/string_tests.move @@ -6,7 +6,7 @@ module std::string_tests { fun test_valid_utf8() { let sparkle_heart = vector[240, 159, 146, 150]; let s = string::utf8(sparkle_heart); - assert!(string::length(&s) == 4, 22); + assert!(s.length() == 4, 22); } #[test] @@ -14,13 +14,13 @@ module std::string_tests { fun test_invalid_utf8() { let no_sparkle_heart = vector[0, 159, 146, 150]; let s = string::utf8(no_sparkle_heart); - assert!(string::length(&s) == 1, 22); + assert!(s.length() == 1, 22); } #[test] fun test_sub_string() { let s = string::utf8(b"abcd"); - let sub = string::sub_string(&s, 2, 4); + let sub = s.sub_string(2, 4); assert!(sub == string::utf8(b"cd"), 22) } @@ -29,28 +29,28 @@ module std::string_tests { fun test_sub_string_invalid_boundary() { let sparkle_heart = vector[240, 159, 146, 150]; let s = string::utf8(sparkle_heart); - let _sub = string::sub_string(&s, 1, 4); + let _sub = s.sub_string(1, 4); } #[test] #[expected_failure(abort_code = string::EINVALID_INDEX)] fun test_sub_string_invalid_index() { let s = string::utf8(b"abcd"); - let _sub = string::sub_string(&s, 4, 5); + let _sub = s.sub_string(4, 5); } #[test] fun test_sub_string_empty() { let s = string::utf8(b"abcd"); - let sub = string::sub_string(&s, 4, 4); - assert!(string::is_empty(&sub), 22) + let sub = s.sub_string(4, 4); + assert!(sub.is_empty(), 22) } #[test] fun test_index_of() { let s = string::utf8(b"abcd"); let r = string::utf8(b"bc"); - let p = string::index_of(&s, &r); + let p = s.index_of(&r); assert!(p == 1, 22) } @@ -58,21 +58,21 @@ module std::string_tests { fun test_index_of_fail() { let s = string::utf8(b"abcd"); let r = string::utf8(b"bce"); - let p = string::index_of(&s, &r); + let p = s.index_of(&r); assert!(p == 4, 22) } #[test] fun test_append() { let s = string::utf8(b"abcd"); - string::append(&mut s, string::utf8(b"ef")); + s.append(string::utf8(b"ef")); assert!(s == string::utf8(b"abcdef"), 22) } #[test] fun test_insert() { let s = string::utf8(b"abcd"); - string::insert(&mut s, 1, string::utf8(b"xy")); + s.insert(1, string::utf8(b"xy")); assert!(s == string::utf8(b"axybcd"), 22) } } diff --git a/aptos-move/framework/move-stdlib/tests/vector_tests.move b/aptos-move/framework/move-stdlib/tests/vector_tests.move index b8c9e19a4dd84..979430812065f 100644 --- a/aptos-move/framework/move-stdlib/tests/vector_tests.move +++ b/aptos-move/framework/move-stdlib/tests/vector_tests.move @@ -3,90 +3,86 @@ module std::vector_tests { use std::vector as V; use std::vector; - struct R has store { } + struct R has store {} + struct Droppable has drop {} + struct NotDroppable {} #[test] fun test_singleton_contains() { - assert!(*V::borrow(&V::singleton(0), 0) == 0, 0); - assert!(*V::borrow(&V::singleton(true), 0) == true, 0); - assert!(*V::borrow(&V::singleton(@0x1), 0) == @0x1, 0); + assert!(V::singleton(0)[0] == 0, 0); + assert!(V::singleton(true)[0] == true, 0); + assert!(V::singleton(@0x1)[0] == @0x1, 0); } #[test] fun test_singleton_len() { - assert!(V::length(&V::singleton(0)) == 1, 0); - assert!(V::length(&V::singleton(true)) == 1, 0); - assert!(V::length(&V::singleton(@0x1)) == 1, 0); + assert!(V::singleton(0).length() == 1, 0); + assert!(V::singleton(true).length() == 1, 0); + assert!(V::singleton(@0x1).length() == 1, 0); } #[test] fun test_empty_is_empty() { - assert!(V::is_empty(&V::empty()), 0); + assert!(V::empty().is_empty(), 0); } #[test] fun append_empties_is_empty() { let v1 = V::empty(); let v2 = V::empty(); - V::append(&mut v1, v2); - assert!(V::is_empty(&v1), 0); + v1.append(v2); + assert!(v1.is_empty(), 0); } #[test] fun append_respects_order_empty_lhs() { let v1 = V::empty(); let v2 = V::empty(); - V::push_back(&mut v2, 0); - V::push_back(&mut v2, 1); - V::push_back(&mut v2, 2); - V::push_back(&mut v2, 3); - V::append(&mut v1, v2); - assert!(!V::is_empty(&v1), 0); - assert!(V::length(&v1) == 4, 1); - assert!(*V::borrow(&v1, 0) == 0, 2); - assert!(*V::borrow(&v1, 1) == 1, 3); - assert!(*V::borrow(&v1, 2) == 2, 4); - assert!(*V::borrow(&v1, 3) == 3, 5); + for (i in 0..4) { + v2.push_back(i) + }; + v1.append(v2); + assert!(!v1.is_empty(), 0); + assert!(v1.length() == 4, 1); + assert!(v1[0] == 0, 2); + assert!(v1[1] == 1, 3); + assert!(v1[2] == 2, 4); + assert!(v1[3] == 3, 5); } #[test] fun append_respects_order_empty_rhs() { let v1 = V::empty(); let v2 = V::empty(); - V::push_back(&mut v1, 0); - V::push_back(&mut v1, 1); - V::push_back(&mut v1, 2); - V::push_back(&mut v1, 3); - V::append(&mut v1, v2); - assert!(!V::is_empty(&v1), 0); - assert!(V::length(&v1) == 4, 1); - assert!(*V::borrow(&v1, 0) == 0, 2); - assert!(*V::borrow(&v1, 1) == 1, 3); - assert!(*V::borrow(&v1, 2) == 2, 4); - assert!(*V::borrow(&v1, 3) == 3, 5); + for (i in 0..4) { + v1.push_back(i) + }; + v1.append(v2); + assert!(!v1.is_empty(), 0); + assert!(v1.length() == 4, 1); + assert!(v1[0] == 0, 2); + assert!(v1[1] == 1, 3); + assert!(v1[2] == 2, 4); + assert!(v1[3] == 3, 5); } #[test] fun append_respects_order_nonempty_rhs_lhs() { let v1 = V::empty(); let v2 = V::empty(); - V::push_back(&mut v1, 0); - V::push_back(&mut v1, 1); - V::push_back(&mut v1, 2); - V::push_back(&mut v1, 3); - V::push_back(&mut v2, 4); - V::push_back(&mut v2, 5); - V::push_back(&mut v2, 6); - V::push_back(&mut v2, 7); - V::append(&mut v1, v2); - assert!(!V::is_empty(&v1), 0); - assert!(V::length(&v1) == 8, 1); - let i = 0; - while (i < 8) { - assert!(*V::borrow(&v1, i) == i, i); - i = i + 1; + for (i in 0..4) { + v1.push_back(i) + }; + for (i in 4..8) { + v2.push_back(i) + }; + v1.append(v2); + assert!(!v1.is_empty(), 0); + assert!(v1.length() == 8, 1); + for (i in 0..8) { + assert!(v1[i] == i, i); } } @@ -94,410 +90,414 @@ module std::vector_tests { fun test_trim() { { let v = V::empty(); - assert!(&V::trim(&mut v, 0) == &vector[], 0); + assert!(&v.trim(0) == &vector[], 0); }; { let v = vector[1]; - assert!(&V::trim(&mut v, 1) == &vector[], 1); - assert!(&V::trim(&mut v, 0) == &vector[1], 2); + assert!(&v.trim(1) == &vector[], 1); + assert!(&v.trim(0) == &vector[1], 2); }; { let v = vector[1, 2]; - assert!(&V::trim(&mut v, 0) == &vector[1, 2], 3); + assert!(&v.trim(0) == &vector[1, 2], 3); + }; + { + let v = vector[1, 2, 3, 4, 5, 6]; + let other = v.trim(4); + assert!(v == vector[1, 2, 3, 4], 4); + assert!(other == vector[5, 6], 5); + + let other_empty = v.trim(4); + assert!(v == vector[1, 2, 3, 4], 6); + assert!(other_empty == vector[], 7); }; } + #[test] #[expected_failure(abort_code = V::EINDEX_OUT_OF_BOUNDS)] fun test_trim_fail() { let v = vector[1]; - V::trim(&mut v, 2); + v.trim(2); + } + + #[test] + #[expected_failure(abort_code = V::EINDEX_OUT_OF_BOUNDS)] + fun test_trim_fail_2() { + let v = vector[1, 2, 3]; + v.trim(4); } #[test] #[expected_failure(vector_error, minor_status = 1, location = Self)] fun borrow_out_of_range() { - let v = V::empty(); - V::push_back(&mut v, 7); - V::borrow(&v, 1); + let v = vector[7]; + v.borrow(1); } #[test] fun vector_contains() { let vec = V::empty(); - assert!(!V::contains(&vec, &0), 1); + assert!(!vec.contains(&0), 1); - V::push_back(&mut vec, 0); - assert!(V::contains(&vec, &0), 2); - assert!(!V::contains(&vec, &1), 3); + vec.push_back(0); + assert!(vec.contains(&0), 2); + assert!(!vec.contains(&1), 3); - V::push_back(&mut vec, 1); - assert!(V::contains(&vec, &0), 4); - assert!(V::contains(&vec, &1), 5); - assert!(!V::contains(&vec, &2), 6); + vec.push_back(1); + assert!(vec.contains(&0), 4); + assert!(vec.contains(&1), 5); + assert!(!vec.contains(&2), 6); - V::push_back(&mut vec, 2); - assert!(V::contains(&vec, &0), 7); - assert!(V::contains(&vec, &1), 8); - assert!(V::contains(&vec, &2), 9); - assert!(!V::contains(&vec, &3), 10); + vec.push_back(2); + assert!(vec.contains(&0), 7); + assert!(vec.contains(&1), 8); + assert!(vec.contains(&2), 9); + assert!(!vec.contains(&3), 10); } #[test] fun destroy_empty() { - V::destroy_empty(V::empty()); - V::destroy_empty(V::empty()); + V::empty().destroy_empty(); + V::empty().destroy_empty(); } #[test] fun destroy_empty_with_pops() { - let v = V::empty(); - V::push_back(&mut v, 42); - V::pop_back(&mut v); - V::destroy_empty(v); + let v = vector[42]; + v.pop_back(); + v.destroy_empty(); } #[test] #[expected_failure(vector_error, minor_status = 3, location = Self)] fun destroy_non_empty() { - let v = V::empty(); - V::push_back(&mut v, 42); - V::destroy_empty(v); + let v = vector[42]; + v.destroy_empty(); } #[test] fun get_set_work() { - let vec = V::empty(); - V::push_back(&mut vec, 0); - V::push_back(&mut vec, 1); - assert!(*V::borrow(&vec, 1) == 1, 0); - assert!(*V::borrow(&vec, 0) == 0, 1); + let vec = vector[0, 1]; + assert!(vec[1] == 1, 0); + assert!(vec[0] == 0, 1); - *V::borrow_mut(&mut vec, 0) = 17; - assert!(*V::borrow(&vec, 1) == 1, 0); - assert!(*V::borrow(&vec, 0) == 17, 0); + vec[0] = 17; + assert!(vec[1] == 1, 0); + assert!(vec[0] == 17, 0); } #[test] #[expected_failure(vector_error, minor_status = 2, location = Self)] fun pop_out_of_range() { let v = V::empty(); - V::pop_back(&mut v); + v.pop_back(); } #[test] fun swap_different_indices() { - let vec = V::empty(); - V::push_back(&mut vec, 0); - V::push_back(&mut vec, 1); - V::push_back(&mut vec, 2); - V::push_back(&mut vec, 3); - V::swap(&mut vec, 0, 3); - V::swap(&mut vec, 1, 2); - assert!(*V::borrow(&vec, 0) == 3, 0); - assert!(*V::borrow(&vec, 1) == 2, 0); - assert!(*V::borrow(&vec, 2) == 1, 0); - assert!(*V::borrow(&vec, 3) == 0, 0); + let vec = vector[0, 1, 2, 3]; + vec.swap(0, 3); + vec.swap(1, 2); + assert!(vec[0] == 3); + assert!(vec[1] == 2); + assert!(vec[2] == 1); + assert!(vec[3] == 0); } #[test] fun swap_same_index() { - let vec = V::empty(); - V::push_back(&mut vec, 0); - V::push_back(&mut vec, 1); - V::push_back(&mut vec, 2); - V::push_back(&mut vec, 3); - V::swap(&mut vec, 1, 1); - assert!(*V::borrow(&vec, 0) == 0, 0); - assert!(*V::borrow(&vec, 1) == 1, 0); - assert!(*V::borrow(&vec, 2) == 2, 0); - assert!(*V::borrow(&vec, 3) == 3, 0); + let vec = vector[0, 1, 2, 3]; + vec.swap(1, 1); + assert!(vec[0] == 0, 0); + assert!(vec[1] == 1, 0); + assert!(vec[2] == 2, 0); + assert!(vec[3] == 3, 0); } #[test] fun remove_singleton_vector() { - let v = V::empty(); - V::push_back(&mut v, 0); - assert!(V::remove(&mut v, 0) == 0, 0); - assert!(V::length(&v) == 0, 0); + let v = V::singleton(0); + assert!(v.remove(0) == 0, 0); + assert!(v.length() == 0, 0); } #[test] fun remove_nonsingleton_vector() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); + let v = vector[0, 1, 2, 3]; - assert!(V::remove(&mut v, 1) == 1, 0); - assert!(V::length(&v) == 3, 0); - assert!(*V::borrow(&v, 0) == 0, 0); - assert!(*V::borrow(&v, 1) == 2, 0); - assert!(*V::borrow(&v, 2) == 3, 0); + assert!(v.remove(1) == 1, 0); + assert!(v.length() == 3, 0); + assert!(v[0] == 0, 0); + assert!(v[1] == 2, 0); + assert!(v[2] == 3, 0); } #[test] fun remove_nonsingleton_vector_last_elem() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); + let v = vector[0, 1, 2, 3]; - assert!(V::remove(&mut v, 3) == 3, 0); - assert!(V::length(&v) == 3, 0); - assert!(*V::borrow(&v, 0) == 0, 0); - assert!(*V::borrow(&v, 1) == 1, 0); - assert!(*V::borrow(&v, 2) == 2, 0); + assert!(v.remove(3) == 3, 0); + assert!(v.length() == 3, 0); + assert!(v[0] == 0, 0); + assert!(v[1] == 1, 0); + assert!(v[2] == 2, 0); } #[test] #[expected_failure(abort_code = V::EINDEX_OUT_OF_BOUNDS)] fun remove_empty_vector() { let v = V::empty(); - V::remove(&mut v, 0); + v.remove(0); } #[test] #[expected_failure(abort_code = V::EINDEX_OUT_OF_BOUNDS)] fun remove_out_of_bound_index() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::remove(&mut v, 1); + let v = vector[0]; + v.remove(1); + } + + fun remove_more_cases() { + let v: vector = vector[1]; + assert!(v.remove(0) == 1, 1); + assert!(&v == &vector[], 1); + + let v: vector = vector[2, 1]; + assert!(v.remove(0) == 2, 1); + assert!(&v == &vector[1], 1); + + let v: vector = vector[1, 2]; + assert!(v.remove(1) == 2, 1); + assert!(&v == &vector[1], 1); + + let v: vector = vector[3, 1, 2]; + assert!(v.remove(0) == 3, 1); + assert!(&v == &vector[1, 2], 1); + + let v: vector = vector[1, 3, 2]; + assert!(v.remove(1) == 3, 1); + assert!(&v == &vector[1, 2], 1); + + let v: vector = vector[1, 2, 3]; + assert!(v.remove(2) == 3, 1); + assert!(&v == &vector[1, 2], 1); + + let v: vector = vector[4, 1, 2, 3]; + assert!(v.remove(0) == 4, 1); + assert!(&v == &vector[1, 2, 3], 1); + + let v: vector = vector[5, 1, 2, 3, 4]; + assert!(v.remove(0) == 5, 1); + assert!(&v == &vector[1, 2, 3, 4], 1); + + let v: vector = vector[1, 5, 2, 3, 4]; + assert!(v.remove(1) == 5, 1); + assert!(&v == &vector[1, 2, 3, 4], 1); + + let v: vector = vector[1, 2, 5, 3, 4]; + assert!(v.remove(2) == 5, 1); + assert!(&v == &vector[1, 2, 3, 4], 1); + + let v: vector = vector[1, 2, 3, 4, 5]; + assert!(v.remove(4) == 5, 1); + assert!(&v == &vector[1, 2, 3, 4], 1); } #[test] fun remove_value_singleton_vector() { - let v = V::empty(); - V::push_back(&mut v, 0); - assert!(V::borrow(&V::remove_value(&mut v, &0), 0) == &0, 0); - assert!(V::length(&v) == 0, 0); + let v = vector[0]; + assert!(v.remove_value(&0)[0] == 0, 0); + assert!(v.length() == 0, 0); } #[test] fun remove_value_nonsingleton_vector() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); + let v = vector[0, 1, 2, 3]; - assert!(V::borrow(&V::remove_value(&mut v, &2), 0) == &2, 0); - assert!(V::length(&v) == 3, 0); - assert!(*V::borrow(&v, 0) == 0, 0); - assert!(*V::borrow(&v, 1) == 1, 0); - assert!(*V::borrow(&v, 2) == 3, 0); + assert!(v.remove_value(&2)[0] == 2, 0); + assert!(v.length() == 3, 0); + assert!(v[0] == 0, 0); + assert!(v[1] == 1, 0); + assert!(v[2] == 3, 0); } #[test] fun remove_value_nonsingleton_vector_last_elem() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); + let v = vector[0, 1, 2, 3]; - assert!(V::borrow(&V::remove_value(&mut v, &3), 0) == &3, 0); - assert!(V::length(&v) == 3, 0); - assert!(*V::borrow(&v, 0) == 0, 0); - assert!(*V::borrow(&v, 1) == 1, 0); - assert!(*V::borrow(&v, 2) == 2, 0); + assert!(v.remove_value(&3)[0] == 3, 0); + assert!(v.length() == 3, 0); + assert!(v[0] == 0, 0); + assert!(v[1] == 1, 0); + assert!(v[2] == 2, 0); } #[test] fun remove_value_empty_vector() { let v = V::empty(); - assert!(V::length(&V::remove_value(&mut v, &1)) == 0, 0); - assert!(V::length(&v) == 0, 1); + assert!(v.remove_value(&1).length() == 0, 0); + assert!(v.length() == 0, 1); } #[test] fun remove_value_nonexistent() { - let v = V::empty(); - V::push_back(&mut v, 0); - assert!(V::length(&V::remove_value(&mut v, &1)) == 0, 0); - assert!(V::length(&v) == 1, 1); + let v = vector[0]; + assert!(v.remove_value(&1).length() == 0, 0); + assert!(v.length() == 1, 1); } #[test] fun reverse_vector_empty() { let v = V::empty(); - let is_empty = V::is_empty(&v); - V::reverse(&mut v); - assert!(is_empty == V::is_empty(&v), 0); + let is_empty = v.is_empty(); + v.reverse(); + assert!(is_empty == v.is_empty(), 0); } #[test] fun reverse_singleton_vector() { - let v = V::empty(); - V::push_back(&mut v, 0); - assert!(*V::borrow(&v, 0) == 0, 1); - V::reverse(&mut v); - assert!(*V::borrow(&v, 0) == 0, 2); + let v = V::singleton(0); + assert!(v[0] == 0, 1); + v.reverse(); + assert!(v[0] == 0, 2); } #[test] fun reverse_vector_nonempty_even_length() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); + let v = vector[0, 1, 2, 3]; - assert!(*V::borrow(&v, 0) == 0, 1); - assert!(*V::borrow(&v, 1) == 1, 2); - assert!(*V::borrow(&v, 2) == 2, 3); - assert!(*V::borrow(&v, 3) == 3, 4); + assert!(v[0] == 0, 1); + assert!(v[1] == 1, 2); + assert!(v[2] == 2, 3); + assert!(v[3] == 3, 4); - V::reverse(&mut v); + v.reverse(); - assert!(*V::borrow(&v, 3) == 0, 5); - assert!(*V::borrow(&v, 2) == 1, 6); - assert!(*V::borrow(&v, 1) == 2, 7); - assert!(*V::borrow(&v, 0) == 3, 8); + assert!(v[3] == 0, 5); + assert!(v[2] == 1, 6); + assert!(v[1] == 2, 7); + assert!(v[0] == 3, 8); } #[test] fun reverse_vector_nonempty_odd_length_non_singleton() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); + let v = vector[0, 1, 2]; - assert!(*V::borrow(&v, 0) == 0, 1); - assert!(*V::borrow(&v, 1) == 1, 2); - assert!(*V::borrow(&v, 2) == 2, 3); + assert!(v[0] == 0, 1); + assert!(v[1] == 1, 2); + assert!(v[2] == 2, 3); - V::reverse(&mut v); + v.reverse(); - assert!(*V::borrow(&v, 2) == 0, 4); - assert!(*V::borrow(&v, 1) == 1, 5); - assert!(*V::borrow(&v, 0) == 2, 6); + assert!(v[2] == 0, 4); + assert!(v[1] == 1, 5); + assert!(v[0] == 2, 6); } #[test] #[expected_failure(vector_error, minor_status = 1, location = Self)] fun swap_empty() { let v = V::empty(); - V::swap(&mut v, 0, 0); + v.swap(0, 0); } #[test] #[expected_failure(vector_error, minor_status = 1, location = Self)] fun swap_out_of_range() { - let v = V::empty(); - - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); + let v = vector[0, 1, 2, 3]; - V::swap(&mut v, 1, 10); + v.swap(1, 10); } #[test] #[expected_failure(abort_code = V::EINDEX_OUT_OF_BOUNDS)] fun swap_remove_empty() { let v = V::empty(); - V::swap_remove(&mut v, 0); + v.swap_remove(0); } #[test] fun swap_remove_singleton() { - let v = V::empty(); - V::push_back(&mut v, 0); - assert!(V::swap_remove(&mut v, 0) == 0, 0); - assert!(V::is_empty(&v), 1); + let v = vector[0]; + assert!(v.swap_remove(0) == 0, 0); + assert!(v.is_empty(), 1); } #[test] fun swap_remove_inside_vector() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); - - assert!(*V::borrow(&v, 0) == 0, 1); - assert!(*V::borrow(&v, 1) == 1, 2); - assert!(*V::borrow(&v, 2) == 2, 3); - assert!(*V::borrow(&v, 3) == 3, 4); + let v = vector[0, 1, 2, 3]; - assert!(V::swap_remove(&mut v, 1) == 1, 5); - assert!(V::length(&v) == 3, 6); + assert!(v[0] == 0, 1); + assert!(v[1] == 1, 2); + assert!(v[2] == 2, 3); + assert!(v[3] == 3, 4); - assert!(*V::borrow(&v, 0) == 0, 7); - assert!(*V::borrow(&v, 1) == 3, 8); - assert!(*V::borrow(&v, 2) == 2, 9); + assert!(v.swap_remove(1) == 1, 5); + assert!(v.length() == 3, 6); + assert!(v[0] == 0, 7); + assert!(v[1] == 3, 8); + assert!(v[2] == 2, 9); } #[test] fun swap_remove_end_of_vector() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); + let v = vector[0, 1, 2, 3]; - assert!(*V::borrow(&v, 0) == 0, 1); - assert!(*V::borrow(&v, 1) == 1, 2); - assert!(*V::borrow(&v, 2) == 2, 3); - assert!(*V::borrow(&v, 3) == 3, 4); + assert!(v[0] == 0, 1); + assert!(v[1] == 1, 2); + assert!(v[2] == 2, 3); + assert!(v[3] == 3, 4); - assert!(V::swap_remove(&mut v, 3) == 3, 5); - assert!(V::length(&v) == 3, 6); + assert!(v.swap_remove(3) == 3, 5); + assert!(v.length() == 3, 6); - assert!(*V::borrow(&v, 0) == 0, 7); - assert!(*V::borrow(&v, 1) == 1, 8); - assert!(*V::borrow(&v, 2) == 2, 9); + assert!(v[0] == 0, 7); + assert!(v[1] == 1, 8); + assert!(v[2] == 2, 9); } #[test] #[expected_failure(vector_error, minor_status = 1, location = std::vector)] fun swap_remove_out_of_range() { - let v = V::empty(); - V::push_back(&mut v, 0); - V::swap_remove(&mut v, 1); + let v = vector[0]; + v.swap_remove(1); } #[test] fun push_back_and_borrow() { let v = V::empty(); - V::push_back(&mut v, 7); - assert!(!V::is_empty(&v), 0); - assert!(V::length(&v) == 1, 1); - assert!(*V::borrow(&v, 0) == 7, 2); + v.push_back(7); + assert!(!v.is_empty(), 0); + assert!(v.length() == 1, 1); + assert!(v[0] == 7, 2); - V::push_back(&mut v, 8); - assert!(V::length(&v) == 2, 3); - assert!(*V::borrow(&v, 0) == 7, 4); - assert!(*V::borrow(&v, 1) == 8, 5); + v.push_back(8); + assert!(v.length() == 2, 3); + assert!(v[0] == 7, 4); + assert!(v[1] == 8, 5); } #[test] fun index_of_empty_not_has() { let v = V::empty(); - let (has, index) = V::index_of(&v, &true); + let (has, index) = v.index_of(&true); assert!(!has, 0); assert!(index == 0, 1); } #[test] fun index_of_nonempty_not_has() { - let v = V::empty(); - V::push_back(&mut v, false); - let (has, index) = V::index_of(&v, &true); + let v = vector[false]; + let (has, index) = v.index_of(&true); assert!(!has, 0); assert!(index == 0, 1); } #[test] fun index_of_nonempty_has() { - let v = V::empty(); - V::push_back(&mut v, false); - V::push_back(&mut v, true); - let (has, index) = V::index_of(&v, &true); + let v = vector[false, true]; + let (has, index) = v.index_of(&true); assert!(has, 0); assert!(index == 1, 1); } @@ -505,11 +505,8 @@ module std::vector_tests { // index_of will return the index first occurence that is equal #[test] fun index_of_nonempty_has_multiple_occurences() { - let v = V::empty(); - V::push_back(&mut v, false); - V::push_back(&mut v, true); - V::push_back(&mut v, true); - let (has, index) = V::index_of(&v, &true); + let v = vector[false, true, true]; + let (has, index) = v.index_of(&true); assert!(has, 0); assert!(index == 1, 1); } @@ -517,40 +514,31 @@ module std::vector_tests { #[test] fun find_empty_not_has() { let v = V::empty(); - let (has, index) = V::find(&v, |_x| true); + let (has, index) = v.find(|_x| true); assert!(!has, 0); assert!(index == 0, 1); } #[test] fun find_nonempty_not_has() { - let v = V::empty(); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - let (has, index) = V::find(&v, |x| *x == 3); + let v = vector[1, 2]; + let (has, index) = v.find(|x| *x == 3); assert!(!has, 0); assert!(index == 0, 1); } #[test] fun find_nonempty_has() { - let v = V::empty(); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); - let (has, index) = V::find(&v, |x| *x == 2); + let v = vector[1, 2, 3]; + let (has, index) = v.find(|x| *x == 2); assert!(has, 0); assert!(index == 1, 1); } #[test] fun find_nonempty_has_multiple_occurences() { - let v = V::empty(); - V::push_back(&mut v, 1); - V::push_back(&mut v, 2); - V::push_back(&mut v, 2); - V::push_back(&mut v, 3); - let (has, index) = V::find(&v, |x| *x == 2); + let v = vector[1, 2, 2, 3]; + let (has, index) = v.find(|x| *x == 2); assert!(has, 0); assert!(index == 1, 1); } @@ -558,13 +546,10 @@ module std::vector_tests { #[test] fun length() { let empty = V::empty(); - assert!(V::length(&empty) == 0, 0); - let i = 0; - let max_len = 42; - while (i < max_len) { - V::push_back(&mut empty, i); - assert!(V::length(&empty) == i + 1, i); - i = i + 1; + assert!(empty.length() == 0); + for (i in 0..42) { + empty.push_back(i); + assert!(empty.length() == i + 1, i); } } @@ -575,30 +560,30 @@ module std::vector_tests { let max_len = 42; while (i < max_len) { - V::push_back(&mut v, i); - i = i + 1; + v.push_back(i); + i += 1; }; while (i > 0) { - assert!(V::pop_back(&mut v) == i - 1, i); - i = i - 1; + assert!(v.pop_back() == i - 1, i); + i -= 1; }; } #[test_only] fun test_natives_with_type(x1: T, x2: T): (T, T) { let v = V::empty(); - assert!(V::length(&v) == 0, 0); - V::push_back(&mut v, x1); - assert!(V::length(&v) == 1, 1); - V::push_back(&mut v, x2); - assert!(V::length(&v) == 2, 2); - V::swap(&mut v, 0, 1); - x1 = V::pop_back(&mut v); - assert!(V::length(&v) == 1, 3); - x2 = V::pop_back(&mut v); - assert!(V::length(&v) == 0, 4); - V::destroy_empty(v); + assert!(v.length() == 0, 0); + v.push_back(x1); + assert!(v.length() == 1, 1); + v.push_back(x2); + assert!(v.length() == 2, 2); + v.swap(0, 1); + x1 = v.pop_back(); + assert!(v.length() == 1, 3); + x2 = v.pop_back(); + assert!(v.length() == 0, 4); + v.destroy_empty(); (x1, x2) } @@ -612,7 +597,7 @@ module std::vector_tests { test_natives_with_type>(V::empty(), V::empty()); - test_natives_with_type(Droppable{}, Droppable{}); + test_natives_with_type(Droppable {}, Droppable {}); (NotDroppable {}, NotDroppable {}) = test_natives_with_type( NotDroppable {}, NotDroppable {} @@ -623,10 +608,10 @@ module std::vector_tests { fun test_for_each() { let v = vector[1, 2, 3]; let s = 0; - V::for_each(v, |e| { - s = s + e; + v.for_each(|e| { + s += e; }); - assert!(s == 6, 0) + assert!(s == 6) } #[test] @@ -634,7 +619,7 @@ module std::vector_tests { let v1 = vector[1, 2, 3]; let v2 = vector[10, 20, 30]; let s = 0; - V::zip(v1, v2, |e1, e2| s = s + e1 * e2); + v1.zip(v2, |e1, e2| s += e1 * e2); assert!(s == 140, 0); } @@ -645,7 +630,7 @@ module std::vector_tests { let v1 = vector[1]; let v2 = vector[10, 20]; let s = 0; - V::zip(v1, v2, |e1, e2| s = s + e1 * e2); + v1.zip(v2, |e1, e2| s = s + e1 * e2); } #[test] @@ -653,9 +638,9 @@ module std::vector_tests { let v = vector[1, 2, 3]; let i_s = 0; let s = 0; - V::enumerate_ref(&v, |i, e| { - i_s = i_s + i; - s = s + *e; + v.enumerate_ref(|i, e| { + i_s += i; + s += *e; }); assert!(i_s == 3, 0); assert!(s == 6, 0); @@ -665,7 +650,7 @@ module std::vector_tests { fun test_for_each_ref() { let v = vector[1, 2, 3]; let s = 0; - V::for_each_ref(&v, |e| s = s + *e); + v.for_each_ref(|e| s += *e); assert!(s == 6, 0) } @@ -673,7 +658,10 @@ module std::vector_tests { fun test_for_each_mut() { let v = vector[1, 2, 3]; let s = 2; - V::for_each_mut(&mut v, |e| { *e = s; s = s + 1 }); + v.for_each_mut(|e| { + *e = s; + s += 1 + }); assert!(v == vector[2, 3, 4], 0) } @@ -682,7 +670,7 @@ module std::vector_tests { let v1 = vector[1, 2, 3]; let v2 = vector[10, 20, 30]; let s = 0; - V::zip_ref(&v1, &v2, |e1, e2| s = s + *e1 * *e2); + v1.zip_ref(&v2, |e1, e2| s += *e1 * *e2); assert!(s == 140, 0); } @@ -693,18 +681,18 @@ module std::vector_tests { let v1 = vector[1]; let v2 = vector[10, 20]; let s = 0; - V::zip_ref(&v1, &v2, |e1, e2| s = s + *e1 * *e2); + v1.zip_ref(&v2, |e1, e2| s += *e1 * *e2); } #[test] fun test_zip_mut() { let v1 = vector[1, 2, 3]; let v2 = vector[10, 20, 30]; - V::zip_mut(&mut v1, &mut v2, |e1, e2| { + v1.zip_mut(&mut v2, |e1, e2| { let e1: &mut u64 = e1; let e2: &mut u64 = e2; - *e1 = *e1 + 1; - *e2 = *e2 + 10; + *e1 += 1; + *e2 += 10; }); assert!(v1 == vector[2, 3, 4], 0); assert!(v2 == vector[20, 30, 40], 0); @@ -714,7 +702,7 @@ module std::vector_tests { fun test_zip_map() { let v1 = vector[1, 2, 3]; let v2 = vector[10, 20, 30]; - let result = V::zip_map(v1, v2, |e1, e2| e1 + e2); + let result = v1.zip_map(v2, |e1, e2| e1 + e2); assert!(result == vector[11, 22, 33], 0); } @@ -722,7 +710,7 @@ module std::vector_tests { fun test_zip_map_ref() { let v1 = vector[1, 2, 3]; let v2 = vector[10, 20, 30]; - let result = V::zip_map_ref(&v1, &v2, |e1, e2| *e1 + *e2); + let result = v1.zip_map_ref(&v2, |e1, e2| *e1 + *e2); assert!(result == vector[11, 22, 33], 0); } @@ -733,7 +721,7 @@ module std::vector_tests { let v1 = vector[1]; let v2 = vector[10, 20]; let s = 0; - V::zip_mut(&mut v1, &mut v2, |e1, e2| s = s + *e1 * *e2); + v1.zip_mut(&mut v2, |e1, e2| s = s + *e1 * *e2); } #[test] @@ -742,7 +730,7 @@ module std::vector_tests { fun test_zip_map_mismatching_lengths_should_fail() { let v1 = vector[1]; let v2 = vector[10, 20]; - V::zip_map(v1, v2, |e1, e2| e1 * e2); + v1.zip_map(v2, |e1, e2| e1 * e2); } #[test] @@ -751,7 +739,7 @@ module std::vector_tests { fun test_zip_map_ref_mismatching_lengths_should_fail() { let v1 = vector[1]; let v2 = vector[10, 20]; - V::zip_map_ref(&v1, &v2, |e1, e2| *e1 * *e2); + v1.zip_map_ref(&v2, |e1, e2| *e1 * *e2); } #[test] @@ -759,10 +747,10 @@ module std::vector_tests { let v = vector[1, 2, 3]; let i_s = 0; let s = 2; - V::enumerate_mut(&mut v, |i, e| { - i_s = i_s + i; + v.enumerate_mut(|i, e| { + i_s += i; *e = s; - s = s + 1 + s += 1 }); assert!(i_s == 3, 0); assert!(v == vector[2, 3, 4], 0); @@ -771,8 +759,8 @@ module std::vector_tests { #[test] fun test_fold() { let v = vector[1, 2, 3]; - let s = V::fold(v, 0, |r, e| r + e); - assert!(s == 6 , 0) + let s = v.fold(0, |r, e| r + e); + assert!(s == 6, 0) } #[test] @@ -781,13 +769,13 @@ module std::vector_tests { { let v = vector[3, 2, 1]; // ((100 - 3) - 2) - 1 = 94 - let s = V::fold(v, 100, |l, r| l - r); + let s = v.fold(100, |l, r| l - r); assert!(s == 94, 0) }; { let v = vector[3, 2, 1]; // 3 - (2 - (1 - 0)) = 2 - let s = V::foldr(v, 0, |l, r| l - r); + let s = v.foldr(0, |l, r| l - r); assert!(s == 2, 1) } } @@ -795,97 +783,141 @@ module std::vector_tests { #[test] fun test_map() { let v = vector[1, 2, 3]; - let s = V::map(v, |x| x + 1); - assert!(s == vector[2, 3, 4] , 0) + let s = v.map(|x| x + 1); + assert!(s == vector[2, 3, 4], 0) } #[test] fun test_map_ref() { let v = vector[1, 2, 3]; - let s = V::map_ref(&v, |x| *x + 1); - assert!(s == vector[2, 3, 4] , 0) + let s = v.map_ref(|x| *x + 1); + assert!(s == vector[2, 3, 4], 0) } #[test] fun test_filter() { let v = vector[1, 2, 3]; - let s = V::filter(v, |x| *x % 2 == 0); - assert!(s == vector[2] , 0) + let s = v.filter(|x| *x % 2 == 0); + assert!(s == vector[2], 0) } #[test] fun test_any() { let v = vector[1, 2, 3]; - let r = V::any(&v, |x| *x > 2); + let r = v.any(|x| *x > 2); assert!(r, 0) } #[test] fun test_all() { let v = vector[1, 2, 3]; - let r = V::all(&v, |x| *x >= 1); + let r = v.all(|x| *x >= 1); assert!(r, 0) } #[test] fun test_rotate() { let v = vector[1, 2, 3, 4, 5]; - assert!(vector::rotate(&mut v, 2) == 3, 0); + assert!(v.rotate(2) == 3, 0); assert!(&v == &vector[3, 4, 5, 1, 2], 1); - assert!(vector::rotate_slice(&mut v, 1, 2, 5) == 4, 2); + assert!(v.rotate_slice(1, 2, 5) == 4, 2); assert!(&v == &vector[3, 5, 1, 2, 4], 3); - assert!(vector::rotate_slice(&mut v, 0, 0, 5) == 5, 2); + assert!(v.rotate_slice(0, 0, 5) == 5, 2); assert!(&v == &vector[3, 5, 1, 2, 4], 3); - assert!(vector::rotate_slice(&mut v, 0, 5, 5) == 0, 2); + assert!(v.rotate_slice(0, 5, 5) == 0, 2); assert!(&v == &vector[3, 5, 1, 2, 4], 3); } #[test] fun test_partition() { let v = vector[1, 2, 3, 4, 5]; - assert!(vector::partition(&mut v, |n| *n % 2 == 0) == 2, 0); + assert!(v.partition(|n| *n % 2 == 0) == 2, 0); assert!(&v == &vector[2, 4, 3, 1, 5], 1); - assert!(vector::partition(&mut v, |_n| false) == 0, 0); + assert!(v.partition(|_n| false) == 0, 0); assert!(&v == &vector[2, 4, 3, 1, 5], 1); - assert!(vector::partition(&mut v, |_n| true) == 5, 0); + assert!(v.partition(|_n| true) == 5, 0); assert!(&v == &vector[2, 4, 3, 1, 5], 1); } #[test] fun test_stable_partition() { - let v:vector = vector[1, 2, 3, 4, 5]; + let v: vector = vector[1, 2, 3, 4, 5]; - assert!(vector::stable_partition(&mut v, |n| *n % 2 == 0) == 2, 0); + assert!(v.stable_partition(|n| *n % 2 == 0) == 2, 0); assert!(&v == &vector[2, 4, 1, 3, 5], 1); - assert!(vector::partition(&mut v, |_n| false) == 0, 0); + assert!(v.partition(|_n| false) == 0, 0); assert!(&v == &vector[2, 4, 1, 3, 5], 1); - assert!(vector::partition(&mut v, |_n| true) == 5, 0); + assert!(v.partition(|_n| true) == 5, 0); assert!(&v == &vector[2, 4, 1, 3, 5], 1); } #[test] fun test_insert() { - let v:vector = vector[1, 2, 3, 4, 5]; + let v: vector = vector[1, 2, 3, 4, 5]; - vector::insert(&mut v,2, 6); + v.insert(2, 6); assert!(&v == &vector[1, 2, 6, 3, 4, 5], 1); - vector::insert(&mut v,6, 7); + v.insert(6, 7); assert!(&v == &vector[1, 2, 6, 3, 4, 5, 7], 1); + + let v: vector = vector[]; + v.insert(0, 1); + assert!(&v == &vector[1], 1); + + let v: vector = vector[1]; + v.insert(0, 2); + assert!(&v == &vector[2, 1], 1); + + let v: vector = vector[1]; + v.insert(1, 2); + assert!(&v == &vector[1, 2], 1); + + let v: vector = vector[1, 2]; + v.insert(0, 3); + assert!(&v == &vector[3, 1, 2], 1); + + let v: vector = vector[1, 2]; + v.insert(1, 3); + assert!(&v == &vector[1, 3, 2], 1); + + let v: vector = vector[1, 2]; + v.insert(2, 3); + assert!(&v == &vector[1, 2, 3], 1); + + let v: vector = vector[1, 2, 3]; + v.insert(0, 4); + assert!(&v == &vector[4, 1, 2, 3], 1); + + let v: vector = vector[1, 2, 3, 4]; + v.insert(0, 5); + assert!(&v == &vector[5, 1, 2, 3, 4], 1); + + let v: vector = vector[1, 2, 3, 4]; + v.insert(1, 5); + assert!(&v == &vector[1, 5, 2, 3, 4], 1); + + let v: vector = vector[1, 2, 3, 4]; + v.insert(2, 5); + assert!(&v == &vector[1, 2, 5, 3, 4], 1); + + let v: vector = vector[1, 2, 3, 4]; + v.insert(4, 5); + assert!(&v == &vector[1, 2, 3, 4, 5], 1); } #[test] #[expected_failure(abort_code = V::EINDEX_OUT_OF_BOUNDS)] fun test_insert_out_of_bounds() { - let v:vector = vector[1, 2, 3, 4, 5]; + let v: vector = vector[1, 2, 3, 4, 5]; - vector::insert(&mut v,6, 6); + v.insert(6, 6); } #[test] @@ -917,18 +949,18 @@ module std::vector_tests { fun test_slice() { let v = &vector[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let slice_beginning = vector::slice(v, 0, 3); + let slice_beginning = v.slice(0, 3); assert!(slice_beginning == vector[0, 1, 2], 1); - let slice_end = vector::slice(v, 7, 10); + let slice_end = v.slice(7, 10); assert!(slice_end == vector[7, 8, 9], 1); - let empty_slice = vector::slice(v, 5, 5); + let empty_slice = v.slice(5, 5); assert!(empty_slice == vector[], 1); - let empty_slice = vector::slice(v, 0, 0); + let empty_slice = v.slice(0, 0); assert!(empty_slice == vector[], 1); - let full_slice = &vector::slice(v, 0, 10); + let full_slice = &v.slice(0, 10); assert!(full_slice == v, 1); } @@ -936,14 +968,14 @@ module std::vector_tests { #[expected_failure(abort_code = V::EINVALID_SLICE_RANGE)] fun test_slice_invalid_range() { let v = &vector[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - vector::slice(v, 7, 6); // start is greater than end + v.slice(7, 6); // start is greater than end } #[test] #[expected_failure(abort_code = V::EINVALID_SLICE_RANGE)] fun test_slice_out_of_bounds() { let v = &vector[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - vector::slice(v, 0, 11); // end is out of bounds + v.slice(0, 11); // end is out of bounds } #[test_only] @@ -952,6 +984,31 @@ module std::vector_tests { #[test] fun test_destroy() { let v = vector[MoveOnly {}]; - vector::destroy(v, |m| { let MoveOnly {} = m; }) + v.destroy(|m| { let MoveOnly {} = m; }) + } + + #[test] + fun test_move_range_ints() { + let v = vector[3, 4, 5, 6]; + let w = vector[1, 2]; + + V::move_range(&mut v, 1, 2, &mut w, 1); + assert!(&v == &vector[3, 6], 0); + assert!(&w == &vector[1, 4, 5, 2], 0); + } + + #[test] + #[expected_failure(abort_code = V::EINDEX_OUT_OF_BOUNDS)] + fun test_replace_empty_abort() { + let v = vector[]; + let MoveOnly {} = v.replace(0, MoveOnly {}); + v.destroy_empty(); + } + + #[test] + fun test_replace() { + let v = vector[1, 2, 3, 4]; + v.replace(1, 17); + assert!(v == vector[1, 17, 3, 4], 0); } } diff --git a/aptos-move/framework/src/aptos-natives.bpl b/aptos-move/framework/src/aptos-natives.bpl index 222b2b855594c..e2c34cf5f21a0 100644 --- a/aptos-move/framework/src/aptos-natives.bpl +++ b/aptos-move/framework/src/aptos-natives.bpl @@ -16,6 +16,226 @@ procedure {:inline 1} $1_object_exists_at{{S}}(object: int) returns (res: bool) {%- endfor %} + + + +{%- for instance in aggregator_v2_instances %} +{%- set S = instance.suffix -%} +{%- set T = instance.name -%} + +// ================================================================================== +// Intrinsic implementation of aggregator_v2 for element type `{{instance.suffix}}` + + +datatype $1_aggregator_v2_Aggregator'{{S}}' { + $1_aggregator_v2_Aggregator'{{S}}'($value: {{T}}, $max_value: {{T}}) +} +function {:inline} $Update'$1_aggregator_v2_Aggregator'{{S}}''_value(s: $1_aggregator_v2_Aggregator'{{S}}', x: {{T}}): $1_aggregator_v2_Aggregator'{{S}}' { + $1_aggregator_v2_Aggregator'{{S}}'(x, s->$max_value) +} +function {:inline} $Update'$1_aggregator_v2_Aggregator'{{S}}''_max_value(s: $1_aggregator_v2_Aggregator'{{S}}', x: {{T}}): $1_aggregator_v2_Aggregator'{{S}}' { + $1_aggregator_v2_Aggregator'{{S}}'(s->$value, x) +} +function $IsValid'$1_aggregator_v2_Aggregator'{{S}}''(s: $1_aggregator_v2_Aggregator'{{S}}'): bool { + $IsValid'{{S}}'(s->$value) + && $IsValid'{{S}}'(s->$max_value) +} +function {:inline} $IsEqual'$1_aggregator_v2_Aggregator'{{S}}''(s1: $1_aggregator_v2_Aggregator'{{S}}', s2: $1_aggregator_v2_Aggregator'{{S}}'): bool { + $IsEqual'{{S}}'(s1->$value, s2->$value) + && $IsEqual'{{S}}'(s1->$max_value, s2->$max_value) +} + +procedure {:inline 1} $1_aggregator_v2_create_unbounded_aggregator'{{S}}'() returns (res: $1_aggregator_v2_Aggregator'{{S}}') +{ + {% if S == "u64" -%} + res := $1_aggregator_v2_Aggregator'{{S}}'(0, $MAX_U64); + {% elif S == "u128" -%} + res := $1_aggregator_v2_Aggregator'{{S}}'(0, $MAX_U128); + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} +} + + + procedure {:inline 1} $1_aggregator_v2_create_aggregator'{{S}}'($max_value: {{T}}) returns (res: $1_aggregator_v2_Aggregator'{{S}}') + { + {% if S == "u64" or S == "u128" -%} + res := $1_aggregator_v2_Aggregator'{{S}}'(0, $max_value); + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + + procedure {:inline 1} $1_aggregator_v2_try_add'{{S}}'(aggregator: $Mutation ($1_aggregator_v2_Aggregator'{{S}}'), value: {{T}}) returns (res: bool, aggregator_updated: $Mutation ($1_aggregator_v2_Aggregator'{{S}}')) + { + {% if S == "u64" or S == "u128" -%} + if ($Dereference(aggregator)->$max_value < value + $Dereference(aggregator)->$value) { + res := false; + aggregator_updated:= aggregator; + } else { + res := true; + aggregator_updated:= $UpdateMutation(aggregator, $1_aggregator_v2_Aggregator'{{S}}'(value + $Dereference(aggregator)->$value, $Dereference(aggregator)->$max_value)); + } + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_try_sub'{{S}}'(aggregator: $Mutation ($1_aggregator_v2_Aggregator'{{S}}'), value: {{T}}) returns (res: bool, aggregator_updated: $Mutation ($1_aggregator_v2_Aggregator'{{S}}')) + { + {% if S == "u64" or S == "u128" -%} + if ($Dereference(aggregator)->$value < value) { + res := false; + aggregator_updated:= aggregator; + return; + } else { + res := true; + aggregator_updated:= $UpdateMutation(aggregator, $1_aggregator_v2_Aggregator'{{S}}'($Dereference(aggregator)->$value - value, $Dereference(aggregator)->$max_value)); + return; + } + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_add'{{S}}'(aggregator: $Mutation ($1_aggregator_v2_Aggregator'{{S}}'), value: {{T}}) returns (aggregator_updated: $Mutation ($1_aggregator_v2_Aggregator'{{S}}')) + { + {% if S == "u64" or S == "u128" -%} + var try_result: bool; + var try_aggregator: $Mutation $1_aggregator_v2_Aggregator'{{S}}'; + call try_result, try_aggregator := $1_aggregator_v2_try_add'{{S}}'(aggregator, value); + if (!try_result) { + call $ExecFailureAbort(); + return; + } + aggregator_updated := try_aggregator; + return; + {% elif "#" in S -%} + var try_result: bool; + var try_aggregator: $Mutation $1_aggregator_v2_Aggregator'{{S}}'; + call try_result, try_aggregator := $1_aggregator_v2_try_add'{{S}}'(aggregator, value); + return; + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_sub'{{S}}'(aggregator: $Mutation ($1_aggregator_v2_Aggregator'{{S}}'), value: {{T}}) returns (aggregator_updated: $Mutation ($1_aggregator_v2_Aggregator'{{S}}')) + { + {% if S == "u64" or S == "u128" -%} + var try_result: bool; + var try_aggregator: $Mutation $1_aggregator_v2_Aggregator'{{S}}'; + call try_result, try_aggregator := $1_aggregator_v2_try_sub'{{S}}'(aggregator, value); + if (!try_result) { + call $ExecFailureAbort(); + return; + } + aggregator_updated := try_aggregator; + return; + {% elif "#" in S -%} + var try_result: bool; + var try_aggregator: $Mutation $1_aggregator_v2_Aggregator'{{S}}'; + call try_result, try_aggregator := $1_aggregator_v2_try_add'{{S}}'(aggregator, value); + return; + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_read'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}') returns (res: {{T}}) { + {% if S == "u64" or S == "u128" -%} + res := aggregator->$value; + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_max_value'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}') returns (res: {{T}}) { + {% if S == "u64" or S == "u128" -%} + res := aggregator->$max_value; + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_is_at_least_impl'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}', min_amount: {{T}}) returns (res: bool) + { + {% if S == "u64" or S == "u128" -%} + res := aggregator->$value >= min_amount; + return; + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + +function {:inline} $1_aggregator_v2_spec_get_value'{{S}}'(s: $1_aggregator_v2_Aggregator'{{S}}'): {{T}} { + s->$value +} + +function {:inline} $1_aggregator_v2_spec_get_max_value'{{S}}'(s: $1_aggregator_v2_Aggregator'{{S}}'): {{T}} { + s->$max_value +} + +function {:inline} $1_aggregator_v2_$read'{{S}}'(s: $1_aggregator_v2_Aggregator'{{S}}'): {{T}} { + s->$value +} + +{% if S == "u64" or S == "u128" -%} + function {:inline} $1_aggregator_v2_$is_at_least_impl'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}', min_amount: int): bool + { + aggregator->$value >= min_amount + } +{% else -%} + function $1_aggregator_v2_$is_at_least_impl'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}', min_amount: {{T}}): bool; +{% endif -%} + +{%- endfor %} + // ================================================================================== // Intrinsic implementation of aggregator and aggregator factory @@ -46,6 +266,13 @@ function {:inline} $IsEqual'$1_aggregator_Aggregator'(s1: $1_aggregator_Aggregat function {:inline} $1_aggregator_spec_get_limit(s: $1_aggregator_Aggregator): int { s->$limit } +function {:inline} $1_aggregator_limit(s: $1_aggregator_Aggregator): int { + s->$limit +} +procedure {:inline 1} $1_aggregator_limit(s: $1_aggregator_Aggregator) returns (res: int) { + res := s->$limit; + return; +} function {:inline} $1_aggregator_spec_get_handle(s: $1_aggregator_Aggregator): int { s->$handle } @@ -77,3 +304,8 @@ axiom (forall limit: int :: {$1_aggregator_factory_spec_new_aggregator(limit)} axiom (forall limit: int :: {$1_aggregator_factory_spec_new_aggregator(limit)} (var agg := $1_aggregator_factory_spec_new_aggregator(limit); $1_aggregator_spec_aggregator_get_val(agg) == 0)); + +// ================================================================================== +// Native for function_info + +procedure $1_function_info_is_identifier(s: Vec int) returns (res: bool); diff --git a/aptos-move/framework/src/aptos.rs b/aptos-move/framework/src/aptos.rs index fd4c6457cd672..ef0212751a263 100644 --- a/aptos-move/framework/src/aptos.rs +++ b/aptos-move/framework/src/aptos.rs @@ -4,9 +4,8 @@ #![forbid(unsafe_code)] use crate::{ - docgen::DocgenOptions, extended_checks, path_in_crate, - release_builder::RELEASE_BUNDLE_EXTENSION, release_bundle::ReleaseBundle, BuildOptions, - ReleaseOptions, + docgen::DocgenOptions, path_in_crate, release_builder::RELEASE_BUNDLE_EXTENSION, + release_bundle::ReleaseBundle, BuildOptions, ReleaseOptions, }; use clap::ValueEnum; use move_command_line_common::address::NumericalAddress; @@ -56,22 +55,27 @@ impl ReleaseTarget { /// Returns the package directories (relative to `framework`), in the order /// they need to be published, as well as an optional path to the file where /// rust bindings generated from the package should be stored. - pub fn packages(self) -> Vec<(&'static str, Option<&'static str>)> { + /// Last element is a boolean on whether to use set_latest_language while building it. + pub fn packages(self) -> Vec<(&'static str, Option<&'static str>, bool)> { let result = vec![ - ("move-stdlib", None), - ("aptos-stdlib", None), + ("move-stdlib", None, false), + ("aptos-stdlib", None, false), ( "aptos-framework", Some("cached-packages/src/aptos_framework_sdk_builder.rs"), + false, ), ( "aptos-token", Some("cached-packages/src/aptos_token_sdk_builder.rs"), + false, ), ( "aptos-token-objects", Some("cached-packages/src/aptos_token_objects_sdk_builder.rs"), + false, ), + ("aptos-experimental", None, true), ]; // Currently we don't have experimental packages only included in particular targets. result @@ -94,20 +98,22 @@ impl ReleaseTarget { let packages = self .packages() .into_iter() - .map(|(path, binding_path)| { - (crate_dir.join(path), binding_path.unwrap_or("").to_owned()) + .map(|(path, binding_path, latest_language)| { + ( + crate_dir.join(path), + binding_path.unwrap_or("").to_owned(), + latest_language, + ) }) .collect::>(); + let package_use_latest_language = packages + .iter() + .map(|(_, _, latest_language)| *latest_language) + .collect(); ReleaseOptions { build_options: BuildOptions { - dev: false, with_srcs, with_abis: true, - with_source_maps: false, - with_error_map: true, - named_addresses: Default::default(), - override_std: None, - install_dir: None, with_docs: true, docgen_options: Some(DocgenOptions { include_impl: true, @@ -120,17 +126,15 @@ impl ReleaseTarget { output_format: None, }), skip_fetch_latest_git_deps: true, - bytecode_version: None, - compiler_version: None, - language_version: None, - skip_attribute_checks: false, - check_test_code: false, - known_attributes: extended_checks::get_all_attribute_names().clone(), + ..BuildOptions::default() }, - packages: packages.iter().map(|(path, _)| path.to_owned()).collect(), + packages: packages + .iter() + .map(|(path, _, _)| path.to_owned()) + .collect(), rust_bindings: packages .into_iter() - .map(|(_, binding)| { + .map(|(_, binding, _)| { if !binding.is_empty() { crate_dir.join(binding).display().to_string() } else { @@ -138,6 +142,7 @@ impl ReleaseTarget { } }) .collect(), + package_use_latest_language, output: if let Some(path) = out { path } else { @@ -183,6 +188,7 @@ static NAMED_ADDRESSES: Lazy> = Lazy::new(|| let one = NumericalAddress::parse_str("0x1").unwrap(); let three = NumericalAddress::parse_str("0x3").unwrap(); let four = NumericalAddress::parse_str("0x4").unwrap(); + let seven = NumericalAddress::parse_str("0x7").unwrap(); let ten = NumericalAddress::parse_str("0xA").unwrap(); let resources = NumericalAddress::parse_str("0xA550C18").unwrap(); result.insert("std".to_owned(), one); @@ -190,6 +196,7 @@ static NAMED_ADDRESSES: Lazy> = Lazy::new(|| result.insert("aptos_framework".to_owned(), one); result.insert("aptos_token".to_owned(), three); result.insert("aptos_token_objects".to_owned(), four); + result.insert("aptos_experimental".to_owned(), seven); result.insert("aptos_fungible_asset".to_owned(), ten); result.insert("core_resources".to_owned(), resources); result.insert("vm".to_owned(), zero); diff --git a/aptos-move/framework/src/built_package.rs b/aptos-move/framework/src/built_package.rs index 44ef45eca2a74..d360cc989f241 100644 --- a/aptos-move/framework/src/built_package.rs +++ b/aptos-move/framework/src/built_package.rs @@ -5,20 +5,30 @@ use crate::{ docgen::DocgenOptions, extended_checks, natives::code::{ModuleMetadata, MoveOption, PackageDep, PackageMetadata, UpgradePolicy}, - zip_metadata, zip_metadata_str, RuntimeModuleMetadataV1, APTOS_METADATA_KEY, - APTOS_METADATA_KEY_V1, METADATA_V1_MIN_FILE_FORMAT_VERSION, + zip_metadata, zip_metadata_str, }; use anyhow::bail; -use aptos_types::{account_address::AccountAddress, transaction::EntryABI}; +use aptos_types::{ + account_address::AccountAddress, + transaction::EntryABI, + vm::module_metadata::{ + RuntimeModuleMetadataV1, APTOS_METADATA_KEY, APTOS_METADATA_KEY_V1, + METADATA_V1_MIN_FILE_FORMAT_VERSION, + }, +}; use clap::Parser; use codespan_reporting::{ diagnostic::Severity, term::termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}, }; use itertools::Itertools; -use move_binary_format::CompiledModule; +use legacy_move_compiler::{ + compiled_unit::{CompiledUnit, NamedCompiledModule}, + shared::NumericalAddress, +}; +use move_binary_format::{file_format_common, file_format_common::VERSION_7, CompiledModule}; use move_command_line_common::files::MOVE_COMPILED_EXTENSION; -use move_compiler::compiled_unit::{CompiledUnit, NamedCompiledModule}; +use move_compiler_v2::{external_checks::ExternalChecks, options::Options, Experiment}; use move_core_types::{language_storage::ModuleId, metadata::Metadata}; use move_model::{ metadata::{CompilerVersion, LanguageVersion}, @@ -26,6 +36,7 @@ use move_model::{ }; use move_package::{ compilation::{compiled_package::CompiledPackage, package_layout::CompiledPackageLayout}, + resolution::resolution_graph::ResolvedGraph, source_package::{ manifest_parser::{parse_move_manifest_string, parse_source_manifest}, std_lib::StdVersion, @@ -37,17 +48,19 @@ use std::{ collections::{BTreeMap, BTreeSet}, io::{stderr, Write}, path::{Path, PathBuf}, + sync::Arc, }; pub const METADATA_FILE_NAME: &str = "package-metadata.bcs"; pub const UPGRADE_POLICY_CUSTOM_FIELD: &str = "upgrade_policy"; -pub const APTOS_PACKAGES: [&str; 5] = [ +pub const APTOS_PACKAGES: [&str; 6] = [ "AptosFramework", "MoveStdlib", "AptosStdlib", "AptosToken", "AptosTokenObjects", + "AptosExperimental", ]; /// Represents a set of options for building artifacts from Move. @@ -95,6 +108,8 @@ pub struct BuildOptions { pub check_test_code: bool, #[clap(skip)] pub known_attributes: BTreeSet, + #[clap(skip)] + pub experiments: Vec, } // Because named_addresses has no parser, we can't use clap's default impl. This must be aligned @@ -119,8 +134,39 @@ impl Default for BuildOptions { compiler_version: None, language_version: None, skip_attribute_checks: false, - check_test_code: false, + check_test_code: true, known_attributes: extended_checks::get_all_attribute_names().clone(), + experiments: vec![], + } + } +} + +impl BuildOptions { + pub fn move_2() -> Self { + BuildOptions { + bytecode_version: Some(VERSION_7), + language_version: Some(LanguageVersion::latest_stable()), + compiler_version: Some(CompilerVersion::latest_stable()), + ..Self::default() + } + } + + pub fn inferred_bytecode_version(&self) -> u32 { + self.language_version + .unwrap_or_default() + .infer_bytecode_version(self.bytecode_version) + } + + pub fn with_experiment(mut self, exp: &str) -> Self { + self.experiments.push(exp.to_string()); + self + } + + pub fn set_latest_language(self) -> Self { + BuildOptions { + language_version: Some(LanguageVersion::latest()), + bytecode_version: Some(file_format_common::VERSION_MAX), + ..self } } } @@ -143,11 +189,16 @@ pub fn build_model( language_version: Option, skip_attribute_checks: bool, known_attributes: BTreeSet, + experiments: Vec, ) -> anyhow::Result { + let bytecode_version = Some( + language_version + .unwrap_or_default() + .infer_bytecode_version(bytecode_version), + ); let build_config = BuildConfig { dev_mode, additional_named_addresses, - architecture: None, generate_abis: false, generate_docs: false, generate_move_model: false, @@ -164,6 +215,7 @@ pub fn build_model( language_version, skip_attribute_checks, known_attributes, + experiments, }, }; let compiler_version = compiler_version.unwrap_or_default(); @@ -183,15 +235,20 @@ impl BuiltPackage { /// This function currently reports all Move compilation errors and warnings to stdout, /// and is not `Ok` if there was an error among those. pub fn build(package_path: PathBuf, options: BuildOptions) -> anyhow::Result { - let bytecode_version = options.bytecode_version; + let build_config = Self::create_build_config(&options)?; + let resolved_graph = Self::prepare_resolution_graph(package_path, build_config.clone())?; + BuiltPackage::build_with_external_checks(resolved_graph, options, build_config, vec![]) + } + + pub fn create_build_config(options: &BuildOptions) -> anyhow::Result { + let bytecode_version = Some(options.inferred_bytecode_version()); let compiler_version = options.compiler_version; let language_version = options.language_version; Self::check_versions(&compiler_version, &language_version)?; let skip_attribute_checks = options.skip_attribute_checks; - let build_config = BuildConfig { + Ok(BuildConfig { dev_mode: options.dev, additional_named_addresses: options.named_addresses.clone(), - architecture: None, generate_abis: options.with_abis, generate_docs: false, generate_move_model: true, @@ -208,69 +265,112 @@ impl BuiltPackage { language_version, skip_attribute_checks, known_attributes: options.known_attributes.clone(), + experiments: options.experiments.clone(), }, - }; + }) + } + pub fn prepare_resolution_graph( + package_path: PathBuf, + build_config: BuildConfig, + ) -> anyhow::Result { eprintln!("Compiling, may take a little while to download git dependencies..."); - let (mut package, model_opt) = - build_config.compile_package_no_exit(&package_path, &mut stderr())?; - - // Run extended checks as well derive runtime metadata - let model = &model_opt.expect("move model"); - let runtime_metadata = extended_checks::run_extended_checks(model); - if model.diag_count(Severity::Warning) > 0 { - let mut error_writer = StandardStream::stderr(ColorChoice::Auto); - model.report_diag(&mut error_writer, Severity::Warning); - if model.has_errors() { - bail!("extended checks failed") + build_config.resolution_graph_for_package(&package_path, &mut stderr()) + } + + /// Same as `build` but allows to provide external checks to be made on Move code. + /// The `external_checks` are only run when compiler v2 is used. + pub fn build_with_external_checks( + resolved_graph: ResolvedGraph, + options: BuildOptions, + build_config: BuildConfig, + external_checks: Vec>, + ) -> anyhow::Result { + { + let package_path = resolved_graph.root_package_path.clone(); + let bytecode_version = build_config.compiler_config.bytecode_version; + + let (mut package, model_opt) = build_config.compile_package_no_exit( + resolved_graph, + external_checks, + &mut stderr(), + )?; + + // Run extended checks as well derive runtime metadata + let model = &model_opt.expect("move model"); + + if let Some(model_options) = model.get_extension::() { + if model_options.experiment_on(Experiment::STOP_BEFORE_EXTENDED_CHECKS) { + std::process::exit(if model.has_warnings() { 1 } else { 0 }) + } } - } - let compiled_pkg_path = package - .compiled_package_info - .build_flags - .install_dir - .as_ref() - .unwrap_or(&package_path) - .join(CompiledPackageLayout::Root.path()) - .join(package.compiled_package_info.package_name.as_str()); - inject_runtime_metadata( - compiled_pkg_path, - &mut package, - runtime_metadata, - bytecode_version, - )?; + let runtime_metadata = extended_checks::run_extended_checks(model); + if model.diag_count(Severity::Warning) > 0 + && !model + .get_extension::() + .is_some_and(|model_options| { + model_options.experiment_on(Experiment::SKIP_BAILOUT_ON_EXTENDED_CHECKS) + }) + { + let mut error_writer = StandardStream::stderr(ColorChoice::Auto); + model.report_diag(&mut error_writer, Severity::Warning); + if model.has_errors() { + bail!("extended checks failed") + } + } - // If enabled generate docs. - if options.with_docs { - let docgen = if let Some(opts) = options.docgen_options.clone() { - opts - } else { - DocgenOptions::default() - }; - let dep_paths = package - .deps_compiled_units - .iter() - .map(|(_, u)| { - u.source_path - .parent() - .unwrap() - .parent() - .unwrap() - .join("doc") - .display() - .to_string() - }) - .unique() - .collect::>(); - docgen.run(package_path.display().to_string(), dep_paths, model)? - } + if let Some(model_options) = model.get_extension::() { + if model_options.experiment_on(Experiment::FAIL_ON_WARNING) && model.has_warnings() + { + bail!("found warning(s), and `--fail-on-warning` is set") + } else if model_options.experiment_on(Experiment::STOP_AFTER_EXTENDED_CHECKS) { + std::process::exit(if model.has_warnings() { 1 } else { 0 }) + } + } - Ok(Self { - options, - package_path, - package, - }) + let compiled_pkg_path = package + .compiled_package_info + .build_flags + .install_dir + .as_ref() + .unwrap_or(&package_path) + .join(CompiledPackageLayout::Root.path()) + .join(package.compiled_package_info.package_name.as_str()); + inject_runtime_metadata( + compiled_pkg_path, + &mut package, + runtime_metadata, + bytecode_version, + )?; + + // If enabled generate docs. + if options.with_docs { + let docgen = options.docgen_options.clone().unwrap_or_default(); + let dep_paths = package + .deps_compiled_units + .iter() + .map(|(_, u)| { + u.source_path + .parent() + .unwrap() + .parent() + .unwrap() + .join("doc") + .display() + .to_string() + }) + .unique() + .collect::>(); + docgen.run(package_path.display().to_string(), dep_paths, model)? + } + + Ok(Self { + options, + package_path, + package, + }) + } } // Check versions and warn user if using unstable ones. @@ -325,9 +425,8 @@ impl BuiltPackage { self.package .root_modules() .map(|unit_with_source| { - unit_with_source - .unit - .serialize(self.options.bytecode_version) + let bytecode_version = self.options.inferred_bytecode_version(); + unit_with_source.unit.serialize(Some(bytecode_version)) }) .collect() } @@ -374,7 +473,7 @@ impl BuiltPackage { .map(|unit_with_source| { unit_with_source .unit - .serialize(self.options.bytecode_version) + .serialize(Some(self.options.inferred_bytecode_version())) }) .collect() } @@ -420,17 +519,28 @@ impl BuiltPackage { .package .deps_compiled_units .iter() - .map(|(name, unit)| { - let package_name = name.as_str().to_string(); - let account = match &unit.unit { - CompiledUnit::Module(m) => AccountAddress::new(m.address.into_bytes()), - _ => panic!("script not a dependency"), - }; - PackageDep { - account, - package_name, - } + .flat_map(|(name, unit)| match &unit.unit { + CompiledUnit::Module(m) => { + let package_name = name.as_str().to_string(); + let account = AccountAddress::new(m.address.into_bytes()); + + Some(PackageDep { + account, + package_name, + }) + }, + CompiledUnit::Script(_) => None, }) + .chain( + self.package + .bytecode_deps + .iter() + .map(|(name, module)| PackageDep { + account: NumericalAddress::from_account_address(*module.self_addr()) + .into_inner(), + package_name: name.as_str().to_string(), + }), + ) .collect::>() .into_iter() .collect(); diff --git a/aptos-move/framework/src/chunked_publish.rs b/aptos-move/framework/src/chunked_publish.rs new file mode 100644 index 0000000000000..3eeaff36a88ef --- /dev/null +++ b/aptos-move/framework/src/chunked_publish.rs @@ -0,0 +1,223 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_types::{ + chain_id::ChainId, + transaction::{EntryFunction, TransactionPayload}, +}; +use move_core_types::{account_address::AccountAddress, ident_str, language_storage::ModuleId}; + +/// The default address where the `large_packages.move` module is deployed. +/// This address is used on both mainnet and testnet. +pub const LARGE_PACKAGES_PROD_MODULE_ADDRESS: &str = + "0x0e1ca3011bdd07246d4d16d909dbb2d6953a86c4735d5acf5865d962c630cce7"; + +/// Address where large packages module is deployed on dev network started from genesis +/// (including devnet and localnet) +pub const LARGE_PACKAGES_DEV_MODULE_ADDRESS: &str = "0x7"; + +/// The default chunk size for splitting code and metadata to fit within the transaction size limits. +pub const CHUNK_SIZE_IN_BYTES: usize = 55_000; + +pub enum PublishType { + AccountDeploy, + ObjectDeploy, + ObjectUpgrade, +} + +pub fn default_large_packages_module_address(chain_id: &ChainId) -> &'static str { + if chain_id.is_mainnet() || chain_id.is_testnet() { + LARGE_PACKAGES_PROD_MODULE_ADDRESS + } else { + LARGE_PACKAGES_DEV_MODULE_ADDRESS + } +} + +pub fn chunk_package_and_create_payloads( + metadata: Vec, + package_code: Vec>, + publish_type: PublishType, + object_address: Option, + large_packages_module_address: AccountAddress, + chunk_size: usize, +) -> Vec { + // Chunk the metadata + let mut metadata_chunks = create_chunks(metadata, chunk_size); + // Separate last chunk for special handling + let mut metadata_chunk = metadata_chunks.pop().expect("Metadata is required"); + + let mut taken_size = metadata_chunk.len(); + let mut payloads = metadata_chunks + .into_iter() + .map(|chunk| { + large_packages_stage_code_chunk(chunk, vec![], vec![], large_packages_module_address) + }) + .collect::>(); + + let mut code_indices: Vec = vec![]; + let mut code_chunks: Vec> = vec![]; + + for (idx, module_code) in package_code.into_iter().enumerate() { + let chunked_module = create_chunks(module_code, chunk_size); + for chunk in chunked_module { + if taken_size + chunk.len() > chunk_size { + // Create a payload and reset accumulators + let payload = large_packages_stage_code_chunk( + metadata_chunk, + code_indices.clone(), + code_chunks.clone(), + large_packages_module_address, + ); + payloads.push(payload); + + metadata_chunk = vec![]; + code_indices.clear(); + code_chunks.clear(); + taken_size = 0; + } + + code_indices.push(idx as u16); + taken_size += chunk.len(); + code_chunks.push(chunk); + } + } + + // The final call includes staging the last metadata and code chunk, and then publishing or upgrading the package on-chain. + let payload = match publish_type { + PublishType::AccountDeploy => large_packages_stage_code_chunk_and_publish_to_account( + metadata_chunk, + code_indices, + code_chunks, + large_packages_module_address, + ), + PublishType::ObjectDeploy => large_packages_stage_code_chunk_and_publish_to_object( + metadata_chunk, + code_indices, + code_chunks, + large_packages_module_address, + ), + PublishType::ObjectUpgrade => large_packages_stage_code_chunk_and_upgrade_object_code( + metadata_chunk, + code_indices, + code_chunks, + object_address.expect("ObjectAddress is missing"), + large_packages_module_address, + ), + }; + payloads.push(payload); + + payloads +} + +// Create chunks of data based on the defined maximum chunk size. +fn create_chunks(data: Vec, chunk_size: usize) -> Vec> { + data.chunks(chunk_size) + .map(|chunk| chunk.to_vec()) + .collect() +} + +// Create a transaction payload for staging chunked data to the staging area. +fn large_packages_stage_code_chunk( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, + large_packages_module_address: AccountAddress, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + large_packages_module_address, + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally publishing the package to an account. +fn large_packages_stage_code_chunk_and_publish_to_account( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, + large_packages_module_address: AccountAddress, +) -> TransactionPayload { + // TODO[Orderless]: Change this to payload v2 format. + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + large_packages_module_address, + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_publish_to_account").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally publishing the package to an object. +fn large_packages_stage_code_chunk_and_publish_to_object( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, + large_packages_module_address: AccountAddress, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + large_packages_module_address, + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_publish_to_object").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + ], + )) +} + +// Create a transaction payload for staging chunked data and finally upgrading the object package. +fn large_packages_stage_code_chunk_and_upgrade_object_code( + metadata_chunk: Vec, + code_indices: Vec, + code_chunks: Vec>, + code_object: AccountAddress, + large_packages_module_address: AccountAddress, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + large_packages_module_address, + ident_str!("large_packages").to_owned(), + ), + ident_str!("stage_code_chunk_and_upgrade_object_code").to_owned(), + vec![], + vec![ + bcs::to_bytes(&metadata_chunk).unwrap(), + bcs::to_bytes(&code_indices).unwrap(), + bcs::to_bytes(&code_chunks).unwrap(), + bcs::to_bytes(&code_object).unwrap(), + ], + )) +} + +// Cleanup account's `StagingArea` resource. +pub fn large_packages_cleanup_staging_area( + large_packages_module_address: AccountAddress, +) -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + large_packages_module_address, + ident_str!("large_packages").to_owned(), + ), + ident_str!("cleanup_staging_area").to_owned(), + vec![], + vec![], + )) +} diff --git a/aptos-move/framework/src/docgen.rs b/aptos-move/framework/src/docgen.rs index f083489831e13..818de044f3033 100644 --- a/aptos-move/framework/src/docgen.rs +++ b/aptos-move/framework/src/docgen.rs @@ -100,6 +100,7 @@ impl DocgenOptions { include_call_diagrams: false, compile_relative_to_output_dir: false, output_format: self.output_format, + ensure_unix_paths: true, }; let output = move_docgen::Docgen::new(model, &options).gen(); if model.diag_count(Severity::Warning) > 0 { diff --git a/aptos-move/framework/src/extended_checks.rs b/aptos-move/framework/src/extended_checks.rs index 336c5bd0d9f21..231a7b4d73700 100644 --- a/aptos-move/framework/src/extended_checks.rs +++ b/aptos-move/framework/src/extended_checks.rs @@ -1,11 +1,14 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{KnownAttribute, RandomnessAnnotation, RuntimeModuleMetadataV1}; -use move_binary_format::file_format::{Ability, AbilitySet, Visibility}; +use aptos_types::vm::module_metadata::{ + KnownAttribute, RandomnessAnnotation, ResourceGroupScope, RuntimeModuleMetadataV1, +}; +use legacy_move_compiler::shared::known_attributes; +use move_binary_format::file_format::Visibility; use move_cli::base::test_validation; -use move_compiler::shared::known_attributes; use move_core_types::{ + ability::{Ability, AbilitySet}, account_address::AccountAddress, errmap::{ErrorDescription, ErrorMapping}, identifier::Identifier, @@ -30,9 +33,7 @@ use once_cell::sync::Lazy; use std::{ collections::{BTreeMap, BTreeSet}, rc::Rc, - str::FromStr, }; -use thiserror::Error; const ALLOW_UNSAFE_RANDOMNESS_ATTRIBUTE: &str = "lint::allow_unsafe_randomness"; const FMT_SKIP_ATTRIBUTE: &str = "fmt::skip"; @@ -40,6 +41,7 @@ const INIT_MODULE_FUN: &str = "init_module"; const LEGACY_ENTRY_FUN_ATTRIBUTE: &str = "legacy_entry_fun"; const ERROR_PREFIX: &str = "E"; const EVENT_STRUCT_ATTRIBUTE: &str = "event"; +const MUTATION_SKIP_ATTRIBUTE: &str = "mutation::skip"; const RANDOMNESS_ATTRIBUTE: &str = "randomness"; const RANDOMNESS_MAX_GAS_CLAIM: &str = "max_gas"; const RESOURCE_GROUP: &str = "resource_group"; @@ -52,10 +54,11 @@ const RANDOMNESS_MODULE_NAME: &str = "randomness"; // top-level attribute names, only. pub fn get_all_attribute_names() -> &'static BTreeSet { - const ALL_ATTRIBUTE_NAMES: [&str; 8] = [ + const ALL_ATTRIBUTE_NAMES: [&str; 9] = [ ALLOW_UNSAFE_RANDOMNESS_ATTRIBUTE, FMT_SKIP_ATTRIBUTE, LEGACY_ENTRY_FUN_ATTRIBUTE, + MUTATION_SKIP_ATTRIBUTE, RESOURCE_GROUP, RESOURCE_GROUP_MEMBER, VIEW_FUN_ATTRIBUTE, @@ -140,7 +143,7 @@ impl<'a> ExtendedChecker<'a> { // ---------------------------------------------------------------------------------- // Module Initialization -impl<'a> ExtendedChecker<'a> { +impl ExtendedChecker<'_> { fn check_init_module(&self, module: &ModuleEnv) { // TODO: also enable init_module by attribute, perhaps deprecate by name let init_module_sym = self.env.symbol_pool().make(INIT_MODULE_FUN); @@ -151,26 +154,44 @@ impl<'a> ExtendedChecker<'a> { &format!("`{}` function must be private", INIT_MODULE_FUN), ) } - for Parameter(_, ty, _) in fun.get_parameters() { + + let record_param_mismatch_error = || { + let msg = format!( + "`{}` function can only take a single parameter of type `signer` or `&signer`", + INIT_MODULE_FUN + ); + self.env.error(&fun.get_id_loc(), &msg); + }; + + if fun.get_parameter_count() != 1 { + record_param_mismatch_error(); + } else { + let Parameter(_, ty, _) = &fun.get_parameters()[0]; let ok = match ty { Type::Primitive(PrimitiveType::Signer) => true, - Type::Reference(_, ty) => matches!(*ty, Type::Primitive(PrimitiveType::Signer)), + Type::Reference(_, ty) => { + matches!(ty.as_ref(), Type::Primitive(PrimitiveType::Signer)) + }, _ => false, }; if !ok { - self.env.error( - &fun.get_id_loc(), - &format!("`{}` function can only take values of type `signer` or `&signer` as parameters", - INIT_MODULE_FUN), - ); + record_param_mismatch_error(); } } + if fun.get_return_count() > 0 { self.env.error( &fun.get_id_loc(), &format!("`{}` function cannot return values", INIT_MODULE_FUN), ) } + + if fun.get_type_parameter_count() > 0 { + self.env.error( + &fun.get_id_loc(), + &format!("`{}` function cannot have type parameters", INIT_MODULE_FUN), + ) + } } } } @@ -178,7 +199,7 @@ impl<'a> ExtendedChecker<'a> { // ---------------------------------------------------------------------------------- // Entry Functions -impl<'a> ExtendedChecker<'a> { +impl ExtendedChecker<'_> { fn check_entry_functions(&self, module: &ModuleEnv) { for ref fun in module.get_functions() { if !fun.is_entry() { @@ -190,7 +211,8 @@ impl<'a> ExtendedChecker<'a> { continue; } - self.check_transaction_args(&fun.get_id_loc(), &fun.get_parameters()); + self.check_transaction_args(&fun.get_parameters()); + self.check_signer_args(&fun.get_parameters()); if fun.get_return_count() > 0 { self.env .error(&fun.get_id_loc(), "entry function cannot return values") @@ -198,12 +220,34 @@ impl<'a> ExtendedChecker<'a> { } } - fn check_transaction_args(&self, _loc: &Loc, arg_tys: &[Parameter]) { + fn check_transaction_args(&self, arg_tys: &[Parameter]) { for Parameter(_sym, ty, param_loc) in arg_tys { self.check_transaction_input_type(param_loc, ty) } } + fn check_signer_args(&self, arg_tys: &[Parameter]) { + // All signer args should precede non-signer args, for an entry function to be + // used as an entry function. + let mut seen_non_signer = false; + for Parameter(_, ty, loc) in arg_tys { + // We assume `&mut signer` are disallowed by checks elsewhere, so it is okay + // for `skip_reference()` below to skip both kinds of reference. + let ty_is_signer = ty.skip_reference().is_signer(); + if seen_non_signer && ty_is_signer { + self.env.warning( + loc, + "to be used as an entry function, all signers should precede non-signers", + ); + } + if !ty_is_signer { + seen_non_signer = true; + } + } + } + + /// Note: this should be kept up in sync with `is_valid_txn_arg` in + /// aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs fn check_transaction_input_type(&self, loc: &Loc, ty: &Type) { use Type::*; match ty { @@ -213,7 +257,7 @@ impl<'a> ExtendedChecker<'a> { Reference(ReferenceKind::Immutable, bt) if matches!(bt.as_ref(), Primitive(PrimitiveType::Signer)) => { - // Reference to signer allowed + // Immutable reference to signer allowed }, Vector(ety) => { // Vectors are allowed if element type is allowed @@ -252,7 +296,7 @@ impl<'a> ExtendedChecker<'a> { // ---------------------------------------------------------------------------------- // Resource Group Functions -impl<'a> ExtendedChecker<'a> { +impl ExtendedChecker<'_> { // A entry in a resource group should contain the resource group attribute and a parameter that // points to a resource group container. fn check_and_record_resource_group_members(&mut self, module: &ModuleEnv) { @@ -472,7 +516,7 @@ impl<'a> ExtendedChecker<'a> { // ---------------------------------------------------------------------------------- // Unbiasable entry functions -impl<'a> ExtendedChecker<'a> { +impl ExtendedChecker<'_> { fn check_and_record_unbiasabale_entry_functions(&mut self, module: &ModuleEnv) { for ref fun in module.get_functions() { let maybe_randomness_annotation = match self.get_randomness_max_gas_declaration(fun) { @@ -567,7 +611,7 @@ impl<'a> ExtendedChecker<'a> { // ---------------------------------------------------------------------------------- // Checks for unsafe usage of randomness -impl<'a> ExtendedChecker<'a> { +impl ExtendedChecker<'_> { /// Checks unsafe usage of the randomness feature for the given module. /// /// 1. Checks that no public function in the module calls randomness features. An @@ -650,13 +694,13 @@ impl<'a> ExtendedChecker<'a> { // ---------------------------------------------------------------------------------- // View Functions -impl<'a> ExtendedChecker<'a> { +impl ExtendedChecker<'_> { fn check_and_record_view_functions(&mut self, module: &ModuleEnv) { for ref fun in module.get_functions() { if !self.has_attribute(fun, VIEW_FUN_ATTRIBUTE) { continue; } - self.check_transaction_args(&fun.get_id_loc(), &fun.get_parameters()); + self.check_transaction_args(&fun.get_parameters()); if fun.get_return_count() == 0 { self.env .error(&fun.get_id_loc(), "`#[view]` function must return values") @@ -709,7 +753,7 @@ impl<'a> ExtendedChecker<'a> { // ---------------------------------------------------------------------------------- // Events -impl<'a> ExtendedChecker<'a> { +impl ExtendedChecker<'_> { fn check_and_record_events(&mut self, module: &ModuleEnv) { for ref struct_ in module.get_structs() { if self.has_attribute_iter(struct_.get_attributes().iter(), EVENT_STRUCT_ATTRIBUTE) { @@ -790,7 +834,7 @@ impl<'a> ExtendedChecker<'a> { // ---------------------------------------------------------------------------------- // Error Map -impl<'a> ExtendedChecker<'a> { +impl ExtendedChecker<'_> { fn build_error_map(&mut self, module: &ModuleEnv<'_>) { // Compute the error map, we are using the `ErrorMapping` type from Move which // is more general as we need as it works for multiple modules. @@ -884,68 +928,3 @@ impl<'a> ExtendedChecker<'a> { == AccountAddress::ONE } } - -// ---------------------------------------------------------------------------------- -// Resource Group Container Scope - -#[derive(Debug, Eq, PartialEq)] -pub enum ResourceGroupScope { - Global, - Address, - Module, -} - -impl ResourceGroupScope { - pub fn is_less_strict(&self, other: &ResourceGroupScope) -> bool { - match self { - ResourceGroupScope::Global => other != self, - ResourceGroupScope::Address => other == &ResourceGroupScope::Module, - ResourceGroupScope::Module => false, - } - } - - pub fn are_equal_envs(&self, resource: &StructEnv, group: &StructEnv) -> bool { - match self { - ResourceGroupScope::Global => true, - ResourceGroupScope::Address => { - resource.module_env.get_name().addr() == group.module_env.get_name().addr() - }, - ResourceGroupScope::Module => { - resource.module_env.get_name() == group.module_env.get_name() - }, - } - } - - pub fn are_equal_module_ids(&self, resource: &ModuleId, group: &ModuleId) -> bool { - match self { - ResourceGroupScope::Global => true, - ResourceGroupScope::Address => resource.address() == group.address(), - ResourceGroupScope::Module => resource == group, - } - } - - pub fn as_str(&self) -> &'static str { - match self { - ResourceGroupScope::Global => "global", - ResourceGroupScope::Address => "address", - ResourceGroupScope::Module => "module_", - } - } -} - -impl FromStr for ResourceGroupScope { - type Err = ResourceGroupScopeError; - - fn from_str(s: &str) -> Result { - match s { - "global" => Ok(ResourceGroupScope::Global), - "address" => Ok(ResourceGroupScope::Address), - "module_" => Ok(ResourceGroupScope::Module), - _ => Err(ResourceGroupScopeError(s.to_string())), - } - } -} - -#[derive(Debug, Error)] -#[error("Invalid resource group scope: {0}")] -pub struct ResourceGroupScopeError(String); diff --git a/aptos-move/framework/src/lib.rs b/aptos-move/framework/src/lib.rs index d9c7a338e48d4..69b17dd30ee8b 100644 --- a/aptos-move/framework/src/lib.rs +++ b/aptos-move/framework/src/lib.rs @@ -12,15 +12,12 @@ use std::io::{Read, Write}; mod built_package; pub use built_package::*; -mod module_metadata; -pub use module_metadata::*; - pub mod natives; mod release_builder; pub use release_builder::*; +pub mod chunked_publish; pub mod docgen; pub mod extended_checks; -pub use extended_checks::ResourceGroupScope; pub mod prover; mod release_bundle; mod released_framework; diff --git a/aptos-move/framework/src/module_metadata.rs b/aptos-move/framework/src/module_metadata.rs deleted file mode 100644 index 7736d60c9bf84..0000000000000 --- a/aptos-move/framework/src/module_metadata.rs +++ /dev/null @@ -1,694 +0,0 @@ -// Copyright © Aptos Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::extended_checks::ResourceGroupScope; -use aptos_types::{ - on_chain_config::{FeatureFlag, Features, TimedFeatures}, - transaction::AbortInfo, -}; -use lru::LruCache; -use move_binary_format::{ - access::ModuleAccess, - file_format::{ - Ability, AbilitySet, CompiledScript, IdentifierIndex, SignatureToken, - StructFieldInformation, TableIndex, - }, - normalized::{Function, Struct}, - CompiledModule, -}; -use move_core_types::{ - errmap::ErrorDescription, - identifier::Identifier, - language_storage::{ModuleId, StructTag}, - metadata::Metadata, -}; -use move_model::metadata::{CompilationMetadata, COMPILATION_METADATA_KEY}; -use move_vm_runtime::move_vm::MoveVM; -use serde::{Deserialize, Serialize}; -use std::{cell::RefCell, collections::BTreeMap, env, sync::Arc}; -use thiserror::Error; - -/// The minimal file format version from which the V1 metadata is supported -pub const METADATA_V1_MIN_FILE_FORMAT_VERSION: u32 = 6; - -// For measuring complexity of a CompiledModule w.r.t. to metadata evaluation. -// This is for the size of types. -/// Cost of one node in a type. -const NODE_COST: usize = 10; -/// Cost of one character in the name of struct referred from a type node. -const IDENT_CHAR_COST: usize = 1; -/// Overall budget for module complexity, calibrated via tests -const COMPLEXITY_BUDGET: usize = 200000000; - -/// The keys used to identify the metadata in the metadata section of the module bytecode. -/// This is more or less arbitrary, besides we should use some unique key to identify -/// Aptos specific metadata (`aptos::` here). -pub static APTOS_METADATA_KEY: &[u8] = "aptos::metadata_v0".as_bytes(); -pub static APTOS_METADATA_KEY_V1: &[u8] = "aptos::metadata_v1".as_bytes(); - -/// Aptos specific metadata attached to the metadata section of file_format. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct RuntimeModuleMetadata { - /// The error map containing the description of error reasons as grabbed from the source. - /// These are typically only a few entries so no relevant size difference. - pub error_map: BTreeMap, -} - -/// V1 of Aptos specific metadata attached to the metadata section of file_format. -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct RuntimeModuleMetadataV1 { - /// The error map containing the description of error reasons as grabbed from the source. - /// These are typically only a few entries so no relevant size difference. - pub error_map: BTreeMap, - - /// Attributes attached to structs. - pub struct_attributes: BTreeMap>, - - /// Attributes attached to functions, by definition index. - pub fun_attributes: BTreeMap>, -} - -/// Enumeration of potentially known attributes -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] -pub struct KnownAttribute { - kind: u8, - args: Vec, -} - -/// Enumeration of known attributes -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] -pub enum KnownAttributeKind { - // An older compiler placed view functions at 0. This was then published to - // Testnet, and now we need to recognize this as a legacy index. - LegacyViewFunction = 0, - ViewFunction = 1, - ResourceGroup = 2, - ResourceGroupMember = 3, - Event = 4, - Randomness = 5, -} - -impl KnownAttribute { - pub fn view_function() -> Self { - Self { - kind: KnownAttributeKind::ViewFunction as u8, - args: vec![], - } - } - - pub fn is_view_function(&self) -> bool { - self.kind == (KnownAttributeKind::LegacyViewFunction as u8) - || self.kind == (KnownAttributeKind::ViewFunction as u8) - } - - pub fn resource_group(scope: ResourceGroupScope) -> Self { - Self { - kind: KnownAttributeKind::ResourceGroup as u8, - args: vec![scope.as_str().to_string()], - } - } - - pub fn is_resource_group(&self) -> bool { - self.kind == KnownAttributeKind::ResourceGroup as u8 - } - - pub fn get_resource_group(&self) -> Option { - if self.kind == KnownAttributeKind::ResourceGroup as u8 { - self.args.first().and_then(|scope| str::parse(scope).ok()) - } else { - None - } - } - - pub fn resource_group_member(container: String) -> Self { - Self { - kind: KnownAttributeKind::ResourceGroupMember as u8, - args: vec![container], - } - } - - pub fn get_resource_group_member(&self) -> Option { - if self.kind == KnownAttributeKind::ResourceGroupMember as u8 { - self.args.first()?.parse().ok() - } else { - None - } - } - - pub fn is_resource_group_member(&self) -> bool { - self.kind == KnownAttributeKind::ResourceGroupMember as u8 - } - - pub fn event() -> Self { - Self { - kind: KnownAttributeKind::Event as u8, - args: vec![], - } - } - - pub fn is_event(&self) -> bool { - self.kind == KnownAttributeKind::Event as u8 - } - - pub fn randomness(claimed_gas: Option) -> Self { - Self { - kind: KnownAttributeKind::Randomness as u8, - args: if let Some(amount) = claimed_gas { - vec![amount.to_string()] - } else { - vec![] - }, - } - } - - pub fn is_randomness(&self) -> bool { - self.kind == KnownAttributeKind::Randomness as u8 - } - - pub fn try_as_randomness_annotation(&self) -> Option { - if self.kind == KnownAttributeKind::Randomness as u8 { - if let Some(arg) = self.args.first() { - let max_gas = arg.parse::().ok(); - Some(RandomnessAnnotation::new(max_gas)) - } else { - Some(RandomnessAnnotation::default()) - } - } else { - None - } - } -} - -const METADATA_CACHE_SIZE: usize = 1024; - -thread_local! { - static V1_METADATA_CACHE: RefCell, Option>>> = RefCell::new(LruCache::new(METADATA_CACHE_SIZE)); - - static V0_METADATA_CACHE: RefCell, Option>>> = RefCell::new(LruCache::new(METADATA_CACHE_SIZE)); -} - -/// Extract metadata from the VM, upgrading V0 to V1 representation as needed -pub fn get_metadata(md: &[Metadata]) -> Option> { - if let Some(data) = md.iter().find(|md| md.key == APTOS_METADATA_KEY_V1) { - V1_METADATA_CACHE.with(|ref_cell| { - let mut cache = ref_cell.borrow_mut(); - if let Some(meta) = cache.get(&data.value) { - meta.clone() - } else { - let meta = bcs::from_bytes::(&data.value) - .ok() - .map(Arc::new); - cache.put(data.value.clone(), meta.clone()); - meta - } - }) - } else { - get_metadata_v0(md) - } -} - -pub fn get_metadata_v0(md: &[Metadata]) -> Option> { - if let Some(data) = md.iter().find(|md| md.key == APTOS_METADATA_KEY) { - V0_METADATA_CACHE.with(|ref_cell| { - let mut cache = ref_cell.borrow_mut(); - if let Some(meta) = cache.get(&data.value) { - meta.clone() - } else { - let meta = bcs::from_bytes::(&data.value) - .ok() - .map(RuntimeModuleMetadata::upgrade) - .map(Arc::new); - cache.put(data.value.clone(), meta.clone()); - meta - } - }) - } else { - None - } -} - -/// Extract metadata from the VM, upgrading V0 to V1 representation as needed -pub fn get_vm_metadata(vm: &MoveVM, module_id: &ModuleId) -> Option> { - vm.with_module_metadata(module_id, get_metadata) -} - -/// Extract metadata from the VM, legacy V0 format upgraded to V1 -pub fn get_vm_metadata_v0( - vm: &MoveVM, - module_id: &ModuleId, -) -> Option> { - vm.with_module_metadata(module_id, get_metadata_v0) -} - -/// Check if the metadata has unknown key/data types -pub fn check_metadata_format( - module: &CompiledModule, - features: &Features, -) -> Result<(), MalformedError> { - let mut exist = false; - let mut compilation_key_exist = false; - for data in module.metadata.iter() { - if data.key == *APTOS_METADATA_KEY || data.key == *APTOS_METADATA_KEY_V1 { - if exist { - return Err(MalformedError::DuplicateKey); - } - exist = true; - - if data.key == *APTOS_METADATA_KEY { - bcs::from_bytes::(&data.value) - .map_err(|e| MalformedError::DeserializedError(data.key.clone(), e))?; - } else if data.key == *APTOS_METADATA_KEY_V1 { - bcs::from_bytes::(&data.value) - .map_err(|e| MalformedError::DeserializedError(data.key.clone(), e))?; - } - } else if features.is_enabled(FeatureFlag::REJECT_UNSTABLE_BYTECODE) - && data.key == *COMPILATION_METADATA_KEY - { - if compilation_key_exist { - return Err(MalformedError::DuplicateKey); - } - compilation_key_exist = true; - bcs::from_bytes::(&data.value) - .map_err(|e| MalformedError::DeserializedError(data.key.clone(), e))?; - } else { - return Err(MalformedError::UnknownKey(data.key.clone())); - } - } - - Ok(()) -} - -/// Extract metadata from a compiled module, upgrading V0 to V1 representation as needed. -pub fn get_metadata_from_compiled_module( - module: &CompiledModule, -) -> Option { - if let Some(data) = find_metadata(module, APTOS_METADATA_KEY_V1) { - let mut metadata = bcs::from_bytes::(&data.value).ok(); - // Clear out metadata for v5, since it shouldn't have existed in the first place and isn't - // being used. Note, this should have been gated in the verify module metadata. - if module.version == 5 { - if let Some(metadata) = metadata.as_mut() { - metadata.struct_attributes.clear(); - metadata.fun_attributes.clear(); - } - } - metadata - } else if let Some(data) = find_metadata(module, APTOS_METADATA_KEY) { - // Old format available, upgrade to new one on the fly - let data_v0 = bcs::from_bytes::(&data.value).ok()?; - Some(data_v0.upgrade()) - } else { - None - } -} - -/// Extract compilation metadata from a compiled module -pub fn get_compilation_metadata_from_compiled_module( - module: &CompiledModule, -) -> Option { - if let Some(data) = find_metadata(module, COMPILATION_METADATA_KEY) { - bcs::from_bytes::(&data.value).ok() - } else { - None - } -} - -// This is mostly a copy paste of the existing function -// get_metadata_from_compiled_module. In the API types there is a unifying trait for -// modules and scripts called Bytecode that could help eliminate this duplication, -// since all we need is a common way to access the metadata, but we'd have to move -// that trait outside of the API types and into somewhere more reasonable for the -// framework to access. There is currently no other trait that both CompiledModule -// and CompiledScript implement. This stands as a future improvement, if we end -// up needing more functions that work similarly for both of these types.. -// -/// Extract metadata from a compiled module, upgrading V0 to V1 representation as needed. -pub fn get_metadata_from_compiled_script( - script: &CompiledScript, -) -> Option { - if let Some(data) = find_metadata_in_script(script, APTOS_METADATA_KEY_V1) { - let mut metadata = bcs::from_bytes::(&data.value).ok(); - // Clear out metadata for v5, since it shouldn't have existed in the first place and isn't - // being used. Note, this should have been gated in the verify module metadata. - if script.version == 5 { - if let Some(metadata) = metadata.as_mut() { - metadata.struct_attributes.clear(); - metadata.fun_attributes.clear(); - } - } - metadata - } else if let Some(data) = find_metadata_in_script(script, APTOS_METADATA_KEY) { - // Old format available, upgrade to new one on the fly - let data_v0 = bcs::from_bytes::(&data.value).ok()?; - Some(data_v0.upgrade()) - } else { - None - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Error)] -pub enum MetaDataValidationError { - #[error(transparent)] - Malformed(MalformedError), - #[error(transparent)] - InvalidAttribute(AttributeValidationError), -} - -impl From for MetaDataValidationError { - fn from(value: MalformedError) -> Self { - MetaDataValidationError::Malformed(value) - } -} - -impl From for MetaDataValidationError { - fn from(value: AttributeValidationError) -> Self { - MetaDataValidationError::InvalidAttribute(value) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Error)] -pub enum MalformedError { - #[error("Unknown key found: {0:?}")] - UnknownKey(Vec), - #[error("Unable to deserialize value for {0:?}: {1}")] - DeserializedError(Vec, bcs::Error), - #[error("Duplicate key for metadata")] - DuplicateKey, - #[error("Module too complex")] - ModuleTooComplex, - #[error("Index out of range")] - IndexOutOfRange, -} - -#[derive(Clone, Debug, PartialEq, Eq, Error)] -#[error("Unknown attribute ({}) for key: {}", self.attribute, self.key)] -pub struct AttributeValidationError { - pub key: String, - pub attribute: u8, -} - -pub fn is_valid_unbiasable_function( - functions: &BTreeMap, - fun: &str, -) -> Result<(), AttributeValidationError> { - if let Ok(ident_fun) = Identifier::new(fun) { - if let Some(f) = functions.get(&ident_fun) { - if f.is_entry && !f.visibility.is_public() { - return Ok(()); - } - } - } - - Err(AttributeValidationError { - key: fun.to_string(), - attribute: KnownAttributeKind::Randomness as u8, - }) -} - -pub fn is_valid_view_function( - functions: &BTreeMap, - fun: &str, -) -> Result<(), AttributeValidationError> { - if let Ok(ident_fun) = Identifier::new(fun) { - if let Some(mod_fun) = functions.get(&ident_fun) { - if !mod_fun.return_.is_empty() { - return Ok(()); - } - } - } - - Err(AttributeValidationError { - key: fun.to_string(), - attribute: KnownAttributeKind::ViewFunction as u8, - }) -} - -pub fn is_valid_resource_group( - structs: &BTreeMap, - struct_: &str, -) -> Result<(), AttributeValidationError> { - if let Ok(ident_struct) = Identifier::new(struct_) { - if let Some(mod_struct) = structs.get(&ident_struct) { - if mod_struct.abilities == AbilitySet::EMPTY - && mod_struct.type_parameters.is_empty() - && mod_struct.fields.len() == 1 - { - return Ok(()); - } - } - } - - Err(AttributeValidationError { - key: struct_.to_string(), - attribute: KnownAttributeKind::ViewFunction as u8, - }) -} - -pub fn is_valid_resource_group_member( - structs: &BTreeMap, - struct_: &str, -) -> Result<(), AttributeValidationError> { - if let Ok(ident_struct) = Identifier::new(struct_) { - if let Some(mod_struct) = structs.get(&ident_struct) { - if mod_struct.abilities.has_ability(Ability::Key) { - return Ok(()); - } - } - } - - Err(AttributeValidationError { - key: struct_.to_string(), - attribute: KnownAttributeKind::ViewFunction as u8, - }) -} - -pub fn verify_module_metadata( - module: &CompiledModule, - features: &Features, - _timed_features: &TimedFeatures, -) -> Result<(), MetaDataValidationError> { - if features.is_enabled(FeatureFlag::SAFER_METADATA) { - check_module_complexity(module)?; - } - - if features.are_resource_groups_enabled() { - check_metadata_format(module, features)?; - } - let metadata = if let Some(metadata) = get_metadata_from_compiled_module(module) { - metadata - } else { - return Ok(()); - }; - - let functions = module - .function_defs - .iter() - .map(|func_def| Function::new(module, func_def)) - .collect::>(); - - for (fun, attrs) in &metadata.fun_attributes { - for attr in attrs { - if attr.is_view_function() { - is_valid_view_function(&functions, fun)?; - } else if attr.is_randomness() { - is_valid_unbiasable_function(&functions, fun)?; - } else { - return Err(AttributeValidationError { - key: fun.clone(), - attribute: attr.kind, - } - .into()); - } - } - } - - let structs = module - .struct_defs - .iter() - .map(|d| Struct::new(module, d)) - .collect::>(); - - for (struct_, attrs) in &metadata.struct_attributes { - for attr in attrs { - if features.are_resource_groups_enabled() { - if attr.is_resource_group() && attr.get_resource_group().is_some() { - is_valid_resource_group(&structs, struct_)?; - continue; - } else if attr.is_resource_group_member() - && attr.get_resource_group_member().is_some() - { - is_valid_resource_group_member(&structs, struct_)?; - continue; - } - } - if features.is_module_event_enabled() && attr.is_event() { - continue; - } - return Err(AttributeValidationError { - key: struct_.clone(), - attribute: attr.kind, - } - .into()); - } - } - Ok(()) -} - -fn find_metadata<'a>(module: &'a CompiledModule, key: &[u8]) -> Option<&'a Metadata> { - module.metadata.iter().find(|md| md.key == key) -} - -fn find_metadata_in_script<'a>(script: &'a CompiledScript, key: &[u8]) -> Option<&'a Metadata> { - script.metadata.iter().find(|md| md.key == key) -} - -impl RuntimeModuleMetadata { - pub fn upgrade(self) -> RuntimeModuleMetadataV1 { - RuntimeModuleMetadataV1 { - error_map: self.error_map, - ..RuntimeModuleMetadataV1::default() - } - } -} - -impl RuntimeModuleMetadataV1 { - pub fn downgrade(self) -> RuntimeModuleMetadata { - RuntimeModuleMetadata { - error_map: self.error_map, - } - } -} - -impl RuntimeModuleMetadataV1 { - pub fn is_empty(&self) -> bool { - self.error_map.is_empty() - && self.fun_attributes.is_empty() - && self.struct_attributes.is_empty() - } - - pub fn extract_abort_info(&self, code: u64) -> Option { - self.error_map - .get(&(code & 0xFFF)) - .or_else(|| self.error_map.get(&code)) - .map(|descr| AbortInfo { - reason_name: descr.code_name.clone(), - description: descr.code_description.clone(), - }) - } -} - -/// Checks the complexity of a module. -fn check_module_complexity(module: &CompiledModule) -> Result<(), MetaDataValidationError> { - let mut meter: usize = 0; - for sig in module.signatures() { - for tok in &sig.0 { - check_sigtok_complexity(module, &mut meter, tok)? - } - } - for handle in module.function_handles() { - check_ident_complexity(module, &mut meter, handle.name)?; - for tok in &safe_get_table(module.signatures(), handle.parameters.0)?.0 { - check_sigtok_complexity(module, &mut meter, tok)? - } - for tok in &safe_get_table(module.signatures(), handle.return_.0)?.0 { - check_sigtok_complexity(module, &mut meter, tok)? - } - } - for handle in module.struct_handles() { - check_ident_complexity(module, &mut meter, handle.name)?; - } - for def in module.struct_defs() { - if let StructFieldInformation::Declared(fields) = &def.field_information { - for field in fields { - check_ident_complexity(module, &mut meter, field.name)?; - check_sigtok_complexity(module, &mut meter, &field.signature.0)? - } - } - } - for def in module.function_defs() { - if let Some(unit) = &def.code { - for tok in &safe_get_table(module.signatures(), unit.locals.0)?.0 { - check_sigtok_complexity(module, &mut meter, tok)? - } - } - } - Ok(()) -} - -// Iterate -- without recursion -- through the nodes of a signature token. Any sub-nodes are -// dealt with via the iterator -fn check_sigtok_complexity( - module: &CompiledModule, - meter: &mut usize, - tok: &SignatureToken, -) -> Result<(), MetaDataValidationError> { - for node in tok.preorder_traversal() { - // Count the node. - *meter = meter.saturating_add(NODE_COST); - match node { - SignatureToken::Struct(idx) | SignatureToken::StructInstantiation(idx, _) => { - let shandle = safe_get_table(module.struct_handles(), idx.0)?; - let mhandle = safe_get_table(module.module_handles(), shandle.module.0)?; - // Count identifier sizes - check_ident_complexity(module, meter, shandle.name)?; - check_ident_complexity(module, meter, mhandle.name)? - }, - _ => {}, - } - check_budget(*meter)? - } - Ok(()) -} - -fn check_ident_complexity( - module: &CompiledModule, - meter: &mut usize, - idx: IdentifierIndex, -) -> Result<(), MetaDataValidationError> { - *meter = meter.saturating_add( - safe_get_table(module.identifiers(), idx.0)? - .len() - .saturating_mul(IDENT_CHAR_COST), - ); - check_budget(*meter) -} - -fn safe_get_table(table: &[A], idx: TableIndex) -> Result<&A, MetaDataValidationError> { - let idx = idx as usize; - if idx < table.len() { - Ok(&table[idx]) - } else { - Err(MetaDataValidationError::Malformed( - MalformedError::IndexOutOfRange, - )) - } -} - -fn check_budget(meter: usize) -> Result<(), MetaDataValidationError> { - let mut budget = COMPLEXITY_BUDGET; - if cfg!(feature = "testing") { - if let Ok(b) = env::var("METADATA_BUDGET_CAL") { - budget = b.parse::().unwrap() - } - } - if meter > budget { - Err(MetaDataValidationError::Malformed( - MalformedError::ModuleTooComplex, - )) - } else { - Ok(()) - } -} - -/// The randomness consuming options specified by developers for their entry function. -/// Examples: `#[randomness(max_gas = 99999)]`, `#[randomness]`. -#[derive(Default)] -pub struct RandomnessAnnotation { - pub max_gas: Option, -} - -impl RandomnessAnnotation { - pub fn new(max_gas: Option) -> Self { - Self { max_gas } - } -} diff --git a/aptos-move/framework/src/natives/account_abstraction.rs b/aptos-move/framework/src/natives/account_abstraction.rs new file mode 100644 index 0000000000000..7a14c5a3904a5 --- /dev/null +++ b/aptos-move/framework/src/natives/account_abstraction.rs @@ -0,0 +1,58 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::function_info::extract_function_info; +use aptos_gas_schedule::gas_params::natives::aptos_framework::DISPATCHABLE_AUTHENTICATE_DISPATCH_BASE; +use aptos_native_interface::{ + RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, SafeNativeResult, +}; +use move_vm_runtime::native_functions::NativeFunction; +use move_vm_types::{loaded_data::runtime_types::Type, values::Value}; +use smallvec::SmallVec; +use std::collections::VecDeque; + +/*************************************************************************************************** + * native fun dispatchable_authenticate + * + * Directs control flow based on the last argument. We use the same native function implementation + * for all dispatching native. + * gas cost: a flat fee because we charged the loading of those modules previously. + * + **************************************************************************************************/ +pub(crate) fn native_dispatch( + context: &mut SafeNativeContext, + ty_args: Vec, + mut arguments: VecDeque, +) -> SafeNativeResult> { + let (module_name, func_name) = extract_function_info(&mut arguments)?; + + // Check that the module is already properly charged in this transaction. + context + .traversal_context() + .check_is_special_or_visited(module_name.address(), module_name.name()) + .map_err(|_| SafeNativeError::Abort { abort_code: 4 })?; + + // Use Error to instruct the VM to perform a function call dispatch. + Err(SafeNativeError::FunctionDispatch { + cost: context.eval_gas(DISPATCHABLE_AUTHENTICATE_DISPATCH_BASE), + module_name, + func_name, + ty_args, + args: arguments.into_iter().collect(), + }) +} + +/*************************************************************************************************** + * module + * + **************************************************************************************************/ +pub fn make_all( + builder: &SafeNativeBuilder, +) -> impl Iterator + '_ { + let natives = [( + "dispatchable_authenticate", + native_dispatch as RawSafeNative, + )]; + + builder.make_named_natives(natives) +} diff --git a/aptos-move/framework/src/natives/aggregator_natives/aggregator_v2.rs b/aptos-move/framework/src/natives/aggregator_natives/aggregator_v2.rs index 521d62bd6ae22..693ebdf9d71cc 100644 --- a/aptos-move/framework/src/natives/aggregator_natives/aggregator_v2.rs +++ b/aptos-move/framework/src/natives/aggregator_natives/aggregator_v2.rs @@ -6,7 +6,6 @@ use aptos_aggregator::{ bounded_math::{BoundedMath, SignedU128}, delayed_field_extension::DelayedFieldData, resolver::DelayedFieldResolver, - types::code_invariant_error, }; use aptos_gas_algebra::NumBytes; use aptos_gas_schedule::gas_params::natives::aptos_framework::*; @@ -14,9 +13,12 @@ use aptos_native_interface::{ safely_pop_arg, RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, SafeNativeResult, }; -use aptos_types::delayed_fields::{ - calculate_width_for_constant_string, calculate_width_for_integer_embedded_string, - SnapshotToStringFormula, +use aptos_types::{ + delayed_fields::{ + calculate_width_for_constant_string, calculate_width_for_integer_embedded_string, + SnapshotToStringFormula, + }, + error::code_invariant_error, }; use move_binary_format::errors::PartialVMError; use move_vm_runtime::native_functions::NativeFunction; @@ -83,9 +85,7 @@ fn create_value_by_type( ) -> SafeNativeResult { match value_ty { Type::U128 => Ok(Value::u128(value)), - Type::U64 => Ok(Value::u64( - u128_to_u64(value).map_err(PartialVMError::from)?, - )), + Type::U64 => Ok(Value::u64(u128_to_u64(value)?)), _ => Err(SafeNativeError::Abort { abort_code: error_code_if_incorrect, }), @@ -97,7 +97,7 @@ fn create_string_value(value: Vec) -> Value { } fn get_context_data<'t, 'b>( - context: &'t mut SafeNativeContext<'_, 'b, '_, '_>, + context: &'t mut SafeNativeContext<'_, 'b, '_>, ) -> Option<(&'b dyn DelayedFieldResolver, RefMut<'t, DelayedFieldData>)> { let aggregator_context = context.extensions().get::(); if aggregator_context.delayed_field_optimization_enabled { @@ -552,7 +552,7 @@ fn native_derive_string_concat( if prefix .len() .checked_add(suffix.len()) - .map_or(false, |v| v > DERIVED_STRING_INPUT_MAX_LENGTH) + .is_some_and(|v| v > DERIVED_STRING_INPUT_MAX_LENGTH) { return Err(SafeNativeError::Abort { abort_code: EINPUT_STRING_LENGTH_TOO_LARGE, @@ -577,7 +577,7 @@ fn native_derive_string_concat( let snapshot_value = get_snapshot_value(&snapshot, snapshot_value_ty)?; let output = SnapshotToStringFormula::Concat { prefix, suffix }.apply_to(snapshot_value); - bytes_and_width_to_derived_string_struct(output, width).map_err(PartialVMError::from)? + bytes_and_width_to_derived_string_struct(output, width)? }; Ok(smallvec![derived_string_snapshot]) diff --git a/aptos-move/framework/src/natives/aggregator_natives/context.rs b/aptos-move/framework/src/natives/aggregator_natives/context.rs index 4e536394e42e3..4193f4538b9a7 100644 --- a/aptos-move/framework/src/natives/aggregator_natives/context.rs +++ b/aptos-move/framework/src/natives/aggregator_natives/context.rs @@ -414,7 +414,7 @@ mod test { ); let derived_width = assert_ok!(calculate_width_for_integer_embedded_string( - "prefixsuffix".as_bytes().len(), + "prefixsuffix".len(), id_from_fake_idx(0, 8) )) as u32; @@ -500,7 +500,7 @@ mod test { ); let derived_width = assert_ok!(calculate_width_for_integer_embedded_string( - "prefixsuffix".as_bytes().len(), + "prefixsuffix".len(), id_from_fake_idx(0, 8) )) as u32; diff --git a/aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs b/aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs index 99d1211a5ea10..a633a3df3d9c8 100644 --- a/aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs +++ b/aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs @@ -56,19 +56,17 @@ pub(crate) fn unpack_aggregator_struct( let pop_with_err = |vec: &mut Vec, msg: &str| { vec.pop() - .map_or(Err(extension_error(msg)), |v| v.value_as::()) + .map_or_else(|| Err(extension_error(msg)), |v| v.value_as::()) }; let limit = pop_with_err(&mut fields, "unable to pop 'limit' field")?; - let key = fields - .pop() - .map_or(Err(extension_error("unable to pop `handle` field")), |v| { - v.value_as::() - })?; - let handle = fields - .pop() - .map_or(Err(extension_error("unable to pop `handle` field")), |v| { - v.value_as::() - })?; + let key = fields.pop().map_or_else( + || Err(extension_error("unable to pop `handle` field")), + |v| v.value_as::(), + )?; + let handle = fields.pop().map_or_else( + || Err(extension_error("unable to pop `handle` field")), + |v| v.value_as::(), + )?; Ok((TableHandle(handle), key, limit)) } diff --git a/aptos-move/framework/src/natives/code.rs b/aptos-move/framework/src/natives/code.rs index 5ecd4b04fb465..b316ca971bf58 100644 --- a/aptos-move/framework/src/natives/code.rs +++ b/aptos-move/framework/src/natives/code.rs @@ -192,11 +192,34 @@ const EALREADY_REQUESTED: u64 = 0x03_0000; const ARBITRARY_POLICY: u8 = 0; /// The native code context. -#[derive(Tid, Default)] +#[derive(Tid)] pub struct NativeCodeContext { - /// Remembers whether the publishing of a module bundle was requested during transaction - /// execution. - pub requested_module_bundle: Option, + /// If false, publish requests are ignored and any attempts to publish code result in runtime + /// errors. + enabled: bool, + /// Possibly stores (if not [None]) the request to publish a module bundle. The request is made + /// using the native code defined in this context. It is later extracted by the VM for further + /// checks and processing the actual publish. + requested_module_bundle: Option, +} + +impl NativeCodeContext { + #[allow(clippy::new_without_default)] + pub fn new() -> Self { + Self { + enabled: true, + requested_module_bundle: None, + } + } + + pub fn extract_publish_request(&mut self) -> Option { + if !self.enabled { + return None; + } + + self.enabled = false; + self.requested_module_bundle.take() + } } /// Represents a request for code publishing made from a native call and to be processed @@ -316,8 +339,8 @@ fn native_request_publish( }); let code_context = context.extensions_mut().get_mut::(); - if code_context.requested_module_bundle.is_some() { - // Can't request second time. + if code_context.requested_module_bundle.is_some() || !code_context.enabled { + // Can't request second time or if publish requests are not allowed. return Err(SafeNativeError::Abort { abort_code: EALREADY_REQUESTED, }); @@ -329,7 +352,7 @@ fn native_request_publish( allowed_deps, check_compat: policy != ARBITRARY_POLICY, }); - // TODO(Gas): charge gas for requesting code load (charge for actual code loading done elsewhere) + Ok(smallvec![]) } diff --git a/aptos-move/framework/src/natives/create_signer.rs b/aptos-move/framework/src/natives/create_signer.rs index c8605088113da..6ba6f79b578d9 100644 --- a/aptos-move/framework/src/natives/create_signer.rs +++ b/aptos-move/framework/src/natives/create_signer.rs @@ -28,7 +28,7 @@ pub(crate) fn native_create_signer( context.charge(ACCOUNT_CREATE_SIGNER_BASE)?; let address = safely_pop_arg!(arguments, AccountAddress); - Ok(smallvec![Value::signer(address)]) + Ok(smallvec![Value::master_signer(address)]) } /*************************************************************************************************** diff --git a/aptos-move/framework/src/natives/cryptography/algebra/arithmetics/scalar_mul.rs b/aptos-move/framework/src/natives/cryptography/algebra/arithmetics/scalar_mul.rs index 53ebdd89b17fd..28b52445c6f4c 100644 --- a/aptos-move/framework/src/natives/cryptography/algebra/arithmetics/scalar_mul.rs +++ b/aptos-move/framework/src/natives/cryptography/algebra/arithmetics/scalar_mul.rs @@ -81,7 +81,7 @@ macro_rules! ark_msm_bigint_wnaf_cost { ($cost_add:expr, $cost_double:expr, $num_entries:expr $(,)?) => {{ let num_entries: usize = $num_entries; let window_size = ark_msm_window_size(num_entries); - let num_windows = (255 + window_size - 1) / window_size; + let num_windows = 255_usize.div_ceil(window_size); let num_buckets = 1_usize << window_size; $cost_add * NumArgs::from(((num_entries + num_buckets + 1) * num_windows) as u64) + $cost_double * NumArgs::from((num_buckets * num_windows) as u64) diff --git a/aptos-move/framework/src/natives/cryptography/algebra/mod.rs b/aptos-move/framework/src/natives/cryptography/algebra/mod.rs index 6a65be4c4e7eb..8de629b0d5e28 100644 --- a/aptos-move/framework/src/natives/cryptography/algebra/mod.rs +++ b/aptos-move/framework/src/natives/cryptography/algebra/mod.rs @@ -70,7 +70,7 @@ impl TryFrom for Structure { type Error = (); fn try_from(value: TypeTag) -> Result { - match value.to_string().as_str() { + match value.to_canonical_string().as_str() { "0x1::bls12381_algebra::Fr" => Ok(Structure::BLS12381Fr), "0x1::bls12381_algebra::Fq12" => Ok(Structure::BLS12381Fq12), "0x1::bls12381_algebra::G1" => Ok(Structure::BLS12381G1), @@ -124,7 +124,7 @@ impl TryFrom for SerializationFormat { type Error = (); fn try_from(value: TypeTag) -> Result { - match value.to_string().as_str() { + match value.to_canonical_string().as_str() { "0x1::bls12381_algebra::FormatFq12LscLsb" => { Ok(SerializationFormat::BLS12381Fq12LscLsb) }, @@ -166,7 +166,7 @@ impl TryFrom for HashToStructureSuite { type Error = (); fn try_from(value: TypeTag) -> Result { - match value.to_string().as_str() { + match value.to_canonical_string().as_str() { "0x1::bls12381_algebra::HashG1XmdSha256SswuRo" => { Ok(HashToStructureSuite::Bls12381g1XmdSha256SswuRo) }, diff --git a/aptos-move/framework/src/natives/cryptography/algebra/serialization.rs b/aptos-move/framework/src/natives/cryptography/algebra/serialization.rs index 83cf9ee34a9b9..19a6457f30aa1 100644 --- a/aptos-move/framework/src/natives/cryptography/algebra/serialization.rs +++ b/aptos-move/framework/src/natives/cryptography/algebra/serialization.rs @@ -10,7 +10,9 @@ use crate::{ }, safe_borrow_element, store_element, structure_from_ty_arg, }; -use aptos_gas_schedule::gas_params::natives::aptos_framework::*; +use aptos_gas_schedule::{ + gas_feature_versions::RELEASE_V1_16, gas_params::natives::aptos_framework::*, +}; use aptos_native_interface::{ safely_pop_arg, SafeNativeContext, SafeNativeError, SafeNativeResult, }; @@ -73,7 +75,7 @@ macro_rules! serialize_element { $structure_to_match:expr, $format_to_match:expr, [$(($field_structure:pat, $field_format:pat, $field_ty:ty, $field_serialization_func:ident,$reverse:expr, $field_serialization_gas:expr)),* $(,)?], - [$(($curve_structure:pat,$curve_format:pat, $curve_ty:ty, $curve_serialization_func:ident, $curve_serialization_gas:expr)),* $(,)?] + [$(($curve_structure:pat,$curve_format:pat, $curve_ty:ty, $curve_serialization_func:ident, $curve_serialization_gas:expr, $into_affine_gas:expr)),* $(,)?] ) => { match ($structure_to_match, $format_to_match) { $( @@ -101,6 +103,9 @@ macro_rules! serialize_element { element_ptr, element ); + if $context.gas_feature_version() >= RELEASE_V1_16 { + $context.charge($into_affine_gas)?; + } let element_affine = element.into_affine(); let mut buf = Vec::new(); $context.charge($curve_serialization_gas)?; @@ -220,56 +225,64 @@ pub fn serialize_internal( SerializationFormat::BLS12381G1Uncompressed, ark_bls12_381::G1Projective, serialize_uncompressed, - ALGEBRA_ARK_BLS12_381_G1_AFFINE_SERIALIZE_UNCOMP + ALGEBRA_ARK_BLS12_381_G1_AFFINE_SERIALIZE_UNCOMP, + ALGEBRA_ARK_BLS12_381_G1_PROJ_TO_AFFINE ), ( Structure::BLS12381G1, SerializationFormat::BLS12381G1Compressed, ark_bls12_381::G1Projective, serialize_compressed, - ALGEBRA_ARK_BLS12_381_G1_AFFINE_SERIALIZE_COMP + ALGEBRA_ARK_BLS12_381_G1_AFFINE_SERIALIZE_COMP, + ALGEBRA_ARK_BLS12_381_G1_PROJ_TO_AFFINE ), ( Structure::BLS12381G2, SerializationFormat::BLS12381G2Uncompressed, ark_bls12_381::G2Projective, serialize_uncompressed, - ALGEBRA_ARK_BLS12_381_G2_AFFINE_SERIALIZE_UNCOMP + ALGEBRA_ARK_BLS12_381_G2_AFFINE_SERIALIZE_UNCOMP, + ALGEBRA_ARK_BLS12_381_G2_PROJ_TO_AFFINE ), ( Structure::BLS12381G2, SerializationFormat::BLS12381G2Compressed, ark_bls12_381::G2Projective, serialize_compressed, - ALGEBRA_ARK_BLS12_381_G2_AFFINE_SERIALIZE_COMP + ALGEBRA_ARK_BLS12_381_G2_AFFINE_SERIALIZE_COMP, + ALGEBRA_ARK_BLS12_381_G2_PROJ_TO_AFFINE ), ( Structure::BN254G1, SerializationFormat::BN254G1Uncompressed, ark_bn254::G1Projective, serialize_uncompressed, - ALGEBRA_ARK_BN254_G1_AFFINE_SERIALIZE_UNCOMP + ALGEBRA_ARK_BN254_G1_AFFINE_SERIALIZE_UNCOMP, + ALGEBRA_ARK_BN254_G1_PROJ_TO_AFFINE ), ( Structure::BN254G1, SerializationFormat::BN254G1Compressed, ark_bn254::G1Projective, serialize_compressed, - ALGEBRA_ARK_BN254_G1_AFFINE_SERIALIZE_COMP + ALGEBRA_ARK_BN254_G1_AFFINE_SERIALIZE_COMP, + ALGEBRA_ARK_BN254_G1_PROJ_TO_AFFINE ), ( Structure::BN254G2, SerializationFormat::BN254G2Uncompressed, ark_bn254::G2Projective, serialize_uncompressed, - ALGEBRA_ARK_BN254_G2_AFFINE_SERIALIZE_UNCOMP + ALGEBRA_ARK_BN254_G2_AFFINE_SERIALIZE_UNCOMP, + ALGEBRA_ARK_BN254_G2_PROJ_TO_AFFINE ), ( Structure::BN254G2, SerializationFormat::BN254G2Compressed, ark_bn254::G2Projective, serialize_compressed, - ALGEBRA_ARK_BN254_G2_AFFINE_SERIALIZE_COMP + ALGEBRA_ARK_BN254_G2_AFFINE_SERIALIZE_COMP, + ALGEBRA_ARK_BN254_G2_PROJ_TO_AFFINE ), ] ) diff --git a/aptos-move/framework/src/natives/cryptography/bulletproofs.rs b/aptos-move/framework/src/natives/cryptography/bulletproofs.rs index 513da6fe504a9..6ce8140563dfb 100644 --- a/aptos-move/framework/src/natives/cryptography/bulletproofs.rs +++ b/aptos-move/framework/src/natives/cryptography/bulletproofs.rs @@ -2,15 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 #[cfg(feature = "testing")] -use crate::natives::cryptography::ristretto255::pop_scalar_from_bytes; +use crate::natives::cryptography::ristretto255::{pop_scalar_from_bytes, pop_scalars_from_bytes}; use crate::natives::cryptography::ristretto255_point::{ get_point_handle, NativeRistrettoPointContext, }; use aptos_crypto::bulletproofs::MAX_RANGE_BITS; use aptos_gas_schedule::gas_params::natives::aptos_framework::*; use aptos_native_interface::{ - safely_pop_arg, RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, - SafeNativeResult, + safely_pop_arg, safely_pop_vec_arg, RawSafeNative, SafeNativeBuilder, SafeNativeContext, + SafeNativeError, SafeNativeResult, }; use bulletproofs::{BulletproofGens, PedersenGens}; #[cfg(feature = "testing")] @@ -39,6 +39,14 @@ pub mod abort_codes { /// Abort code when the requested range is larger than the maximum supported one. /// NOTE: This must match the code in the Move implementation pub const NFE_RANGE_NOT_SUPPORTED: u64 = 0x01_0003; + + /// Abort code when the requested batch size is larger than the maximum supported one. + /// NOTE: This must match the code in the Move implementation + pub const NFE_BATCH_SIZE_NOT_SUPPORTED: u64 = 0x01_0004; + + /// Abort code when the vector lengths of values and blinding factors do not match. + /// NOTE: This must match the code in the Move implementation + pub const NFE_VECTOR_LENGTHS_MISMATCH: u64 = 0x01_0005; } /// The Bulletproofs library only seems to support proving [0, 2^{num_bits}) ranges where num_bits is @@ -47,9 +55,14 @@ fn is_supported_number_of_bits(num_bits: usize) -> bool { matches!(num_bits, 8 | 16 | 32 | 64) } -/// Public parameters of the Bulletproof range proof system +/// The Bulletproofs library only supports batch sizes of 1, 2, 4, 8, or 16. +fn is_supported_batch_size(batch_size: usize) -> bool { + matches!(batch_size, 1 | 2 | 4 | 8 | 16) +} + +/// Public parameters of the Bulletproof range proof system, for both individual and batch proving static BULLETPROOF_GENERATORS: Lazy = - Lazy::new(|| BulletproofGens::new(MAX_RANGE_BITS, 1)); + Lazy::new(|| BulletproofGens::new(MAX_RANGE_BITS, 16)); fn native_verify_range_proof( context: &mut SafeNativeContext, @@ -91,6 +104,54 @@ fn native_verify_range_proof( verify_range_proof(context, &comm_point, &pg, &proof_bytes[..], num_bits, dst) } +fn native_verify_batch_range_proof( + context: &mut SafeNativeContext, + _ty_args: Vec, + mut args: VecDeque, +) -> SafeNativeResult> { + debug_assert!(_ty_args.is_empty()); + debug_assert!(args.len() == 6); + + let dst = safely_pop_arg!(args, Vec); + let num_bits = safely_pop_arg!(args, u64) as usize; + let proof_bytes = safely_pop_arg!(args, Vec); + let rand_base_handle = get_point_handle(&safely_pop_arg!(args, StructRef))?; + let val_base_handle = get_point_handle(&safely_pop_arg!(args, StructRef))?; + let comm_bytes = safely_pop_vec_arg!(args, Vec); + + let comm_points = comm_bytes + .iter() + .map(|comm_bytes| CompressedRistretto::from_slice(comm_bytes.as_slice())) + .collect::>(); + + if !is_supported_number_of_bits(num_bits) { + return Err(SafeNativeError::Abort { + abort_code: abort_codes::NFE_RANGE_NOT_SUPPORTED, + }); + } + if !is_supported_batch_size(comm_points.len()) { + return Err(SafeNativeError::Abort { + abort_code: abort_codes::NFE_BATCH_SIZE_NOT_SUPPORTED, + }); + } + + let pg = { + let point_context = context.extensions().get::(); + let point_data = point_context.point_data.borrow_mut(); + + let rand_base = point_data.get_point(&rand_base_handle); + let val_base = point_data.get_point(&val_base_handle); + + // TODO(Perf): Is there a way to avoid this unnecessary cloning here? + PedersenGens { + B: *val_base, + B_blinding: *rand_base, + } + }; + + verify_batch_range_proof(context, &comm_points, &pg, &proof_bytes[..], num_bits, dst) +} + #[cfg(feature = "testing")] /// This is a test-only native that charges zero gas. It is only exported in testing mode. fn native_test_only_prove_range( @@ -157,6 +218,93 @@ fn native_test_only_prove_range( ]) } +#[cfg(feature = "testing")] +/// This is a test-only native that charges zero gas. It is only exported in testing mode. +fn native_test_only_batch_prove_range( + context: &mut SafeNativeContext, + _ty_args: Vec, + mut args: VecDeque, +) -> SafeNativeResult> { + debug_assert!(_ty_args.is_empty()); + debug_assert!(args.len() == 6); + + let rand_base_handle = get_point_handle(&safely_pop_arg!(args, StructRef))?; + let val_base_handle = get_point_handle(&safely_pop_arg!(args, StructRef))?; + let dst = safely_pop_arg!(args, Vec); + let num_bits = safely_pop_arg!(args, u64) as usize; + let v_blindings = pop_scalars_from_bytes(&mut args)?; + let vs = pop_scalars_from_bytes(&mut args)?; + + if !is_supported_number_of_bits(num_bits) { + return Err(SafeNativeError::Abort { + abort_code: abort_codes::NFE_RANGE_NOT_SUPPORTED, + }); + } + if !is_supported_batch_size(vs.len()) { + return Err(SafeNativeError::Abort { + abort_code: abort_codes::NFE_BATCH_SIZE_NOT_SUPPORTED, + }); + } + if vs.len() != v_blindings.len() { + return Err(SafeNativeError::Abort { + abort_code: abort_codes::NFE_VECTOR_LENGTHS_MISMATCH, + }); + } + + // Make sure only the first 64 bits are set for each Scalar. + if !vs + .iter() + .all(|v| v.as_bytes()[8..].iter().all(|&byte| byte == 0u8)) + { + return Err(SafeNativeError::Abort { + abort_code: abort_codes::NFE_VALUE_OUTSIDE_RANGE, + }); + } + + // Convert each Scalar to u64. + let vs = vs + .iter() + .map(|v| LittleEndian::read_u64(v.as_bytes())) + .collect::>(); + + let mut t = Transcript::new(dst.as_slice()); + + let pg = { + let point_context = context.extensions().get::(); + let point_data = point_context.point_data.borrow_mut(); + + let rand_base = point_data.get_point(&rand_base_handle); + let val_base = point_data.get_point(&val_base_handle); + + // TODO(Perf): Is there a way to avoid this unnecessary cloning here? + PedersenGens { + B: *val_base, + B_blinding: *rand_base, + } + }; + + // Construct a range proof. + let (proof, commitments) = bulletproofs::RangeProof::prove_multiple( + &BULLETPROOF_GENERATORS, + &pg, + &mut t, + &vs, + &v_blindings, + num_bits, + ) + .expect("Bulletproofs prover failed unexpectedly"); + + Ok(smallvec![ + Value::vector_u8(proof.to_bytes()), + Value::vector_for_testing_only( + commitments + .iter() + .map(|commitment| Value::vector_u8(commitment.as_bytes().to_vec())) + .collect::>() + ) + ]) +} + /*************************************************************************************************** * module * @@ -204,21 +352,100 @@ fn verify_range_proof( Ok(smallvec![Value::bool(success)]) } +/// Helper function to gas meter and verify a batch Bulletproof range proof for Pedersen +/// commitments with `pc_gens` as their commitment keys. +fn verify_batch_range_proof( + context: &mut SafeNativeContext, + comm_points: &[CompressedRistretto], + pc_gens: &PedersenGens, + proof_bytes: &[u8], + bit_length: usize, + dst: Vec, +) -> SafeNativeResult> { + charge_gas(context, comm_points.len(), bit_length)?; + + let range_proof = match bulletproofs::RangeProof::from_bytes(proof_bytes) { + Ok(proof) => proof, + Err(_) => { + return Err(SafeNativeError::Abort { + abort_code: abort_codes::NFE_DESERIALIZE_RANGE_PROOF, + }) + }, + }; + + let mut ver_trans = Transcript::new(dst.as_slice()); + + let success = range_proof + .verify_multiple( + &BULLETPROOF_GENERATORS, + pc_gens, + &mut ver_trans, + comm_points, + bit_length, + ) + .is_ok(); + + Ok(smallvec![Value::bool(success)]) +} + +/// Charges base gas fee for verifying and deserializing a Bulletproof range proof. +fn charge_gas( + context: &mut SafeNativeContext, + batch_size: usize, + bit_length: usize, +) -> SafeNativeResult<()> { + match (batch_size, bit_length) { + (1, 8) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_1_BITS_8), + (1, 16) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_1_BITS_16), + (1, 32) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_1_BITS_32), + (1, 64) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_1_BITS_64), + (2, 8) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_2_BITS_8), + (2, 16) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_2_BITS_16), + (2, 32) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_2_BITS_32), + (2, 64) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_2_BITS_64), + (4, 8) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_4_BITS_8), + (4, 16) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_4_BITS_16), + (4, 32) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_4_BITS_32), + (4, 64) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_4_BITS_64), + (8, 8) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_8_BITS_8), + (8, 16) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_8_BITS_16), + (8, 32) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_8_BITS_32), + (8, 64) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_8_BITS_64), + (16, 8) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_16_BITS_8), + (16, 16) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_16_BITS_16), + (16, 32) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_16_BITS_32), + (16, 64) => context.charge(BULLETPROOFS_VERIFY_BASE_BATCH_16_BITS_64), + _ => unreachable!(), + } +} + pub fn make_all( builder: &SafeNativeBuilder, ) -> impl Iterator + '_ { let mut natives = vec![]; #[cfg(feature = "testing")] - natives.extend([( - "prove_range_internal", - native_test_only_prove_range as RawSafeNative, - )]); - - natives.extend([( - "verify_range_proof_internal", - native_verify_range_proof as RawSafeNative, - )]); + natives.extend([ + ( + "prove_range_internal", + native_test_only_prove_range as RawSafeNative, + ), + ( + "prove_batch_range_internal", + native_test_only_batch_prove_range, + ), + ]); + + natives.extend([ + ( + "verify_range_proof_internal", + native_verify_range_proof as RawSafeNative, + ), + ( + "verify_batch_range_proof_internal", + native_verify_batch_range_proof, + ), + ]); builder.make_named_natives(natives) } diff --git a/aptos-move/framework/src/natives/cryptography/multi_ed25519.rs b/aptos-move/framework/src/natives/cryptography/multi_ed25519.rs index ad80f69e16ff4..e3de0f4e7b074 100644 --- a/aptos-move/framework/src/natives/cryptography/multi_ed25519.rs +++ b/aptos-move/framework/src/natives/cryptography/multi_ed25519.rs @@ -99,7 +99,7 @@ fn num_valid_subpks( Ok(slice) => { if CompressedEdwardsY(slice) .decompress() - .map_or(false, |point| !point.is_small_order()) + .is_some_and(|point| !point.is_small_order()) { num_valid += 1; } else { diff --git a/aptos-move/framework/src/natives/cryptography/ristretto255.rs b/aptos-move/framework/src/natives/cryptography/ristretto255.rs index 93d3d888843af..88be28e232867 100644 --- a/aptos-move/framework/src/natives/cryptography/ristretto255.rs +++ b/aptos-move/framework/src/natives/cryptography/ristretto255.rs @@ -5,8 +5,8 @@ use crate::natives::cryptography::{ristretto255_point, ristretto255_scalar}; use aptos_gas_algebra::GasExpression; use aptos_gas_schedule::{gas_params::natives::aptos_framework::*, NativeGasParameters}; use aptos_native_interface::{ - safely_assert_eq, safely_pop_arg, RawSafeNative, SafeNativeBuilder, SafeNativeError, - SafeNativeResult, + safely_assert_eq, safely_pop_arg, safely_pop_vec_arg, RawSafeNative, SafeNativeBuilder, + SafeNativeError, SafeNativeResult, }; use aptos_types::vm_status::StatusCode; use curve25519_dalek::scalar::Scalar; @@ -174,6 +174,16 @@ pub fn pop_scalar_from_bytes(arguments: &mut VecDeque) -> SafeNativeResul scalar_from_valid_bytes(bytes) } +/// Pops a Scalars off the argument stack when the argument was a `vector>`. +pub fn pop_scalars_from_bytes(arguments: &mut VecDeque) -> SafeNativeResult> { + let bytes = safely_pop_vec_arg!(arguments, Vec); + + bytes + .into_iter() + .map(scalar_from_valid_bytes) + .collect::>>() +} + /// The 'data' field inside a Move Scalar struct is at index 0. const DATA_FIELD_INDEX: usize = 0; diff --git a/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs b/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs index d7d0d76a6784d..1bbd16f456023 100644 --- a/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs +++ b/aptos-move/framework/src/natives/dispatchable_fungible_asset.rs @@ -1,7 +1,8 @@ -use super::function_info::extract_function_info; -use aptos_gas_schedule::gas_params::natives::aptos_framework::DISPATCHABLE_FUNGIBLE_ASSET_DISPATCH_BASE; // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 + +use super::function_info::extract_function_info; +use aptos_gas_schedule::gas_params::natives::aptos_framework::DISPATCHABLE_FUNGIBLE_ASSET_DISPATCH_BASE; use aptos_native_interface::{ RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, SafeNativeResult, }; @@ -11,7 +12,7 @@ use smallvec::SmallVec; use std::collections::VecDeque; /*************************************************************************************************** - * native fun dispatchable_withdraw / dispatchable_deposit / dispatchable_derived_balance + * native fun dispatchable_withdraw / dispatchable_deposit / dispatchable_derived_balance / dispatchable_derived_supply * * Directs control flow based on the last argument. We use the same native function implementation * for all dispatching native. @@ -24,14 +25,24 @@ pub(crate) fn native_dispatch( mut arguments: VecDeque, ) -> SafeNativeResult> { let (module_name, func_name) = extract_function_info(&mut arguments)?; + // Check if the module is already properly charged in this transaction. - if !context - .traversal_context() - .visited - .contains_key(&(module_name.address(), module_name.name())) - { - return Err(SafeNativeError::Abort { abort_code: 4 }); - } + let check_visited = |a, n| { + let special_addresses_considered_visited = + context.get_feature_flags().is_account_abstraction_enabled() + || context + .get_feature_flags() + .is_derivable_account_abstraction_enabled(); + if special_addresses_considered_visited { + context + .traversal_context() + .check_is_special_or_visited(a, n) + } else { + context.traversal_context().legacy_check_visited(a, n) + } + }; + check_visited(module_name.address(), module_name.name()) + .map_err(|_| SafeNativeError::Abort { abort_code: 4 })?; // Use Error to instruct the VM to perform a function call dispatch. Err(SafeNativeError::FunctionDispatch { @@ -54,6 +65,7 @@ pub fn make_all( ("dispatchable_withdraw", native_dispatch as RawSafeNative), ("dispatchable_deposit", native_dispatch), ("dispatchable_derived_balance", native_dispatch), + ("dispatchable_derived_supply", native_dispatch), ]; builder.make_named_natives(natives) diff --git a/aptos-move/framework/src/natives/event.rs b/aptos-move/framework/src/natives/event.rs index 01de485b3f4fc..0aeb399ff439d 100644 --- a/aptos-move/framework/src/natives/event.rs +++ b/aptos-move/framework/src/natives/event.rs @@ -16,16 +16,16 @@ use move_binary_format::errors::PartialVMError; use move_core_types::{language_storage::TypeTag, value::MoveTypeLayout, vm_status::StatusCode}; use move_vm_runtime::native_functions::NativeFunction; #[cfg(feature = "testing")] -use move_vm_types::value_serde::deserialize_and_allow_delayed_values; -#[cfg(feature = "testing")] use move_vm_types::values::{Reference, Struct, StructRef}; use move_vm_types::{ - loaded_data::runtime_types::Type, value_serde::serialize_and_allow_delayed_values, - values::Value, + loaded_data::runtime_types::Type, value_serde::ValueSerDeContext, values::Value, }; use smallvec::{smallvec, SmallVec}; use std::collections::VecDeque; +/// Error code from `0x1::events.move`, returned when event creation fails. +pub const ECANNOT_CREATE_EVENT: u64 = 1; + /// Cached emitted module events. #[derive(Default, Tid)] pub struct NativeEventContext { @@ -92,20 +92,28 @@ fn native_write_to_event_store( let ty_tag = context.type_to_type_tag(&ty)?; let (layout, has_aggregator_lifting) = context.type_to_type_layout_with_identifier_mappings(&ty)?; - let blob = serialize_and_allow_delayed_values(&msg, &layout)?.ok_or_else(|| { - SafeNativeError::InvariantViolation(PartialVMError::new( - StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, - )) - })?; + + let function_value_extension = context.function_value_extension(); + let blob = ValueSerDeContext::new() + .with_delayed_fields_serde() + .with_func_args_deserialization(&function_value_extension) + .serialize(&msg, &layout)? + .ok_or_else(|| { + SafeNativeError::InvariantViolation(PartialVMError::new( + StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, + )) + })?; let key = bcs::from_bytes(guid.as_slice()).map_err(|_| { SafeNativeError::InvariantViolation(PartialVMError::new(StatusCode::EVENT_KEY_MISMATCH)) })?; let ctx = context.extensions_mut().get_mut::(); - ctx.events.push(( - ContractEvent::new_v1(key, seq_num, ty_tag, blob), - has_aggregator_lifting.then_some(layout), - )); + let event = + ContractEvent::new_v1(key, seq_num, ty_tag, blob).map_err(|_| SafeNativeError::Abort { + abort_code: ECANNOT_CREATE_EVENT, + })?; + ctx.events + .push((event, has_aggregator_lifting.then_some(layout))); Ok(smallvec![]) } @@ -147,16 +155,20 @@ fn native_emitted_events_by_handle( let key = EventKey::new(creation_num, addr); let ty_tag = context.type_to_type_tag(&ty)?; let ty_layout = context.type_to_type_layout(&ty)?; - let ctx = context.extensions_mut().get_mut::(); + let ctx = context.extensions().get::(); let events = ctx .emitted_v1_events(&key, &ty_tag) .into_iter() .map(|blob| { - Value::simple_deserialize(blob, &ty_layout).ok_or_else(|| { - SafeNativeError::InvariantViolation(PartialVMError::new( - StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, - )) - }) + let function_value_extension = context.function_value_extension(); + ValueSerDeContext::new() + .with_func_args_deserialization(&function_value_extension) + .deserialize(blob, &ty_layout) + .ok_or_else(|| { + SafeNativeError::InvariantViolation(PartialVMError::new( + StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, + )) + }) }) .collect::>>()?; Ok(smallvec![Value::vector_for_testing_only(events)]) @@ -175,16 +187,22 @@ fn native_emitted_events( let ty_tag = context.type_to_type_tag(&ty)?; let ty_layout = context.type_to_type_layout(&ty)?; - let ctx = context.extensions_mut().get_mut::(); + let ctx = context.extensions().get::(); + let events = ctx .emitted_v2_events(&ty_tag) .into_iter() .map(|blob| { - deserialize_and_allow_delayed_values(blob, &ty_layout).ok_or_else(|| { - SafeNativeError::InvariantViolation(PartialVMError::new( - StatusCode::VALUE_DESERIALIZATION_ERROR, - )) - }) + let function_value_extension = context.function_value_extension(); + ValueSerDeContext::new() + .with_func_args_deserialization(&function_value_extension) + .with_delayed_fields_serde() + .deserialize(blob, &ty_layout) + .ok_or_else(|| { + SafeNativeError::InvariantViolation(PartialVMError::new( + StatusCode::VALUE_DESERIALIZATION_ERROR, + )) + }) }) .collect::>>()?; Ok(smallvec![Value::vector_for_testing_only(events)]) @@ -210,41 +228,56 @@ fn native_write_module_event_to_store( let type_tag = context.type_to_type_tag(&ty)?; // Additional runtime check for module call. - if let (Some(id), _, _) = context - .stack_frames(1) + let stack_frames = context.stack_frames(1); + let id = stack_frames .stack_trace() .first() + .map(|(caller, _, _)| caller) .ok_or_else(|| { - SafeNativeError::InvariantViolation(PartialVMError::new( - StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, - )) + let err = PartialVMError::new_invariant_violation( + "Caller frame for 0x1::emit::event is not found", + ); + SafeNativeError::InvariantViolation(err) })? - { - if let TypeTag::Struct(ref struct_tag) = type_tag { - if id != &struct_tag.module_id() { - return Err(SafeNativeError::InvariantViolation(PartialVMError::new( - StatusCode::INTERNAL_TYPE_ERROR, - ))); - } - } else { + .as_ref() + .ok_or_else(|| { + // If module is not known, this call must come from the script, which is not allowed. + let err = PartialVMError::new_invariant_violation("Scripts cannot emit events"); + SafeNativeError::InvariantViolation(err) + })?; + + if let TypeTag::Struct(ref struct_tag) = type_tag { + if id != &struct_tag.module_id() { return Err(SafeNativeError::InvariantViolation(PartialVMError::new( StatusCode::INTERNAL_TYPE_ERROR, ))); } + } else { + return Err(SafeNativeError::InvariantViolation(PartialVMError::new( + StatusCode::INTERNAL_TYPE_ERROR, + ))); } + let (layout, has_identifier_mappings) = context.type_to_type_layout_with_identifier_mappings(&ty)?; - let blob = serialize_and_allow_delayed_values(&msg, &layout)?.ok_or_else(|| { - SafeNativeError::InvariantViolation( - PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) - .with_message("Event serialization failure".to_string()), - ) - })?; + + let function_value_extension = context.function_value_extension(); + let blob = ValueSerDeContext::new() + .with_delayed_fields_serde() + .with_func_args_deserialization(&function_value_extension) + .serialize(&msg, &layout)? + .ok_or_else(|| { + SafeNativeError::InvariantViolation(PartialVMError::new_invariant_violation( + "Event serialization failure", + )) + })?; + let ctx = context.extensions_mut().get_mut::(); - ctx.events.push(( - ContractEvent::new_v2(type_tag, blob), - has_identifier_mappings.then_some(layout), - )); + let event = ContractEvent::new_v2(type_tag, blob).map_err(|_| SafeNativeError::Abort { + abort_code: ECANNOT_CREATE_EVENT, + })?; + ctx.events + .push((event, has_identifier_mappings.then_some(layout))); Ok(smallvec![]) } diff --git a/aptos-move/framework/src/natives/function_info.rs b/aptos-move/framework/src/natives/function_info.rs index 80a953af031dd..c342cf3c420f6 100644 --- a/aptos-move/framework/src/natives/function_info.rs +++ b/aptos-move/framework/src/natives/function_info.rs @@ -1,5 +1,6 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 + use aptos_gas_schedule::gas_params::natives::aptos_framework::*; use aptos_native_interface::{ safely_pop_arg, RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, @@ -18,7 +19,7 @@ use move_vm_types::{ use smallvec::{smallvec, SmallVec}; use std::collections::VecDeque; -// Extract Identifer from a move value of type &String +// Extract Identifier from a move value of type &String fn identifier_from_ref(v: Value) -> SafeNativeResult { let bytes = v .value_as::() @@ -83,13 +84,24 @@ fn native_check_dispatch_type_compatibility_impl( let (rhs, rhs_id) = { let (module, func) = extract_function_info(&mut arguments)?; - if !context - .traversal_context() - .visited - .contains_key(&(module.address(), module.name())) - { - return Err(SafeNativeError::Abort { abort_code: 2 }); - } + + let check_visited = |a, n| { + let special_addresses_considered_visited = + context.get_feature_flags().is_account_abstraction_enabled() + || context + .get_feature_flags() + .is_derivable_account_abstraction_enabled(); + if special_addresses_considered_visited { + context + .traversal_context() + .check_is_special_or_visited(a, n) + } else { + context.traversal_context().legacy_check_visited(a, n) + } + }; + check_visited(module.address(), module.name()) + .map_err(|_| SafeNativeError::Abort { abort_code: 2 })?; + ( context .load_function(&module, &func) @@ -115,7 +127,8 @@ fn native_check_dispatch_type_compatibility_impl( rhs.ty_param_abilities() == lhs.ty_param_abilities() && rhs.return_tys() == lhs.return_tys() && &lhs.param_tys()[0..lhs.param_count() - 1] == rhs.param_tys() - && !rhs.is_friend_or_private() + && rhs.is_public() + && !rhs.is_native() && lhs_id != rhs_id )]) } diff --git a/aptos-move/framework/src/natives/mod.rs b/aptos-move/framework/src/natives/mod.rs index bec070b996540..4fc3c8ec76e07 100644 --- a/aptos-move/framework/src/natives/mod.rs +++ b/aptos-move/framework/src/natives/mod.rs @@ -3,6 +3,8 @@ // SPDX-License-Identifier: Apache-2.0 pub mod account; + +pub mod account_abstraction; pub mod aggregator_natives; pub mod code; pub mod consensus_config; @@ -15,6 +17,7 @@ pub mod function_info; pub mod hash; pub mod object; pub mod object_code_deployment; +pub mod permissioned_signer; pub mod randomness; pub mod state_storage; pub mod string_utils; @@ -24,7 +27,7 @@ pub mod util; use crate::natives::cryptography::multi_ed25519; use aggregator_natives::{aggregator, aggregator_factory, aggregator_v2}; -use aptos_native_interface::SafeNativeBuilder; +use aptos_native_interface::{RawSafeNative, SafeNativeBuilder}; use cryptography::ed25519; use move_core_types::account_address::AccountAddress; use move_vm_runtime::native_functions::{make_table_from_iter, NativeFunctionTable}; @@ -39,6 +42,7 @@ pub mod status { pub fn all_natives( framework_addr: AccountAddress, builder: &SafeNativeBuilder, + inject_create_signer_for_gov_sim: bool, ) -> NativeFunctionTable { let mut natives = vec![]; @@ -90,6 +94,24 @@ pub fn all_natives( "dispatchable_fungible_asset", dispatchable_fungible_asset::make_all(builder) ); + add_natives_from_module!( + "permissioned_signer", + permissioned_signer::make_all(builder) + ); + add_natives_from_module!( + "account_abstraction", + account_abstraction::make_all(builder) + ); + + if inject_create_signer_for_gov_sim { + add_natives_from_module!( + "aptos_governance", + builder.make_named_natives([( + "create_signer", + create_signer::native_create_signer as RawSafeNative + )]) + ); + } make_table_from_iter(framework_addr, natives) } diff --git a/aptos-move/framework/src/natives/permissioned_signer.rs b/aptos-move/framework/src/natives/permissioned_signer.rs new file mode 100644 index 0000000000000..658bc7a8778ac --- /dev/null +++ b/aptos-move/framework/src/natives/permissioned_signer.rs @@ -0,0 +1,178 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 +use aptos_gas_schedule::gas_params::natives::{ + aptos_framework::{ + IS_PERMISSIONED_SIGNER_BASE, PERMISSION_ADDRESS_BASE, SIGNER_FROM_PERMISSIONED_HANDLE_BASE, + }, + move_stdlib::SIGNER_BORROW_ADDRESS_BASE, +}; +use aptos_native_interface::{ + safely_pop_arg, RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, + SafeNativeResult, +}; +use move_core_types::account_address::AccountAddress; +use move_vm_runtime::native_functions::NativeFunction; +use move_vm_types::{ + loaded_data::runtime_types::Type, + values::{SignerRef, Value}, +}; +use smallvec::{smallvec, SmallVec}; +use std::collections::VecDeque; + +const EPERMISSION_SIGNER_DISABLED: u64 = 9; + +/*************************************************************************************************** + * native fun is_permissioned_signer_impl + * + * Returns true if the signer passed in is a permissioned signer + * gas cost: base_cost + * + **************************************************************************************************/ +fn native_is_permissioned_signer_impl( + context: &mut SafeNativeContext, + _ty_args: Vec, + mut arguments: VecDeque, +) -> SafeNativeResult> { + debug_assert!(arguments.len() == 1); + + if !context + .get_feature_flags() + .is_enabled(aptos_types::on_chain_config::FeatureFlag::PERMISSIONED_SIGNER) + { + return SafeNativeResult::Err(SafeNativeError::Abort { + abort_code: EPERMISSION_SIGNER_DISABLED, + }); + } + + let signer = safely_pop_arg!(arguments, SignerRef); + + context.charge(IS_PERMISSIONED_SIGNER_BASE)?; + let result = signer.is_permissioned()?; + + Ok(smallvec![Value::bool(result)]) +} + +/*************************************************************************************************** + * native fun permission_address + * + * Returns the permission storage address if the signer passed in is a permissioned signer + * gas cost: base_cost + * + **************************************************************************************************/ +fn native_permission_address( + context: &mut SafeNativeContext, + _ty_args: Vec, + mut args: VecDeque, +) -> SafeNativeResult> { + debug_assert!(args.len() == 1); + + if !context + .get_feature_flags() + .is_enabled(aptos_types::on_chain_config::FeatureFlag::PERMISSIONED_SIGNER) + { + return SafeNativeResult::Err(SafeNativeError::Abort { + abort_code: EPERMISSION_SIGNER_DISABLED, + }); + } + + let signer = safely_pop_arg!(args, SignerRef); + + context.charge(PERMISSION_ADDRESS_BASE)?; + if !signer.is_permissioned()? { + return Err(SafeNativeError::Abort { abort_code: 3 }); + } + + Ok(smallvec![signer.permission_address()?]) +} + +/*************************************************************************************************** + * native fun signer_from_permissioned_handle_impl + * + * Returns the permission signer from a master signer. + * gas cost: base_cost + * + **************************************************************************************************/ +fn native_signer_from_permissioned( + context: &mut SafeNativeContext, + _ty_args: Vec, + mut arguments: VecDeque, +) -> SafeNativeResult> { + debug_assert!(arguments.len() == 2); + + if !context + .get_feature_flags() + .is_enabled(aptos_types::on_chain_config::FeatureFlag::PERMISSIONED_SIGNER) + { + return SafeNativeResult::Err(SafeNativeError::Abort { + abort_code: EPERMISSION_SIGNER_DISABLED, + }); + } + + let permission_addr = safely_pop_arg!(arguments, AccountAddress); + let master_addr = safely_pop_arg!(arguments, AccountAddress); + context.charge(SIGNER_FROM_PERMISSIONED_HANDLE_BASE)?; + + Ok(smallvec![Value::permissioned_signer( + master_addr, + permission_addr + )]) +} + +/*************************************************************************************************** + * native fun borrow_address + * + * gas cost: base_cost + * + **************************************************************************************************/ +#[inline] +fn native_borrow_address( + context: &mut SafeNativeContext, + _ty_args: Vec, + mut arguments: VecDeque, +) -> SafeNativeResult> { + debug_assert!(_ty_args.is_empty()); + debug_assert!(arguments.len() == 1); + + let signer_reference = safely_pop_arg!(arguments, SignerRef); + + if !context + .get_feature_flags() + .is_enabled(aptos_types::on_chain_config::FeatureFlag::PERMISSIONED_SIGNER) + && signer_reference.is_permissioned()? + { + return SafeNativeResult::Err(SafeNativeError::Abort { + abort_code: EPERMISSION_SIGNER_DISABLED, + }); + } + + context.charge(SIGNER_BORROW_ADDRESS_BASE)?; + + Ok(smallvec![signer_reference.borrow_signer()?]) +} + +/*************************************************************************************************** + * module + * + **************************************************************************************************/ +pub fn make_all( + builder: &SafeNativeBuilder, +) -> impl Iterator + '_ { + let natives = [ + ( + "is_permissioned_signer_impl", + native_is_permissioned_signer_impl as RawSafeNative, + ), + ( + "is_permissioned_signer", + native_is_permissioned_signer_impl as RawSafeNative, + ), + ("permission_address", native_permission_address), + ( + "signer_from_permissioned_handle_impl", + native_signer_from_permissioned, + ), + ("borrow_address", native_borrow_address), + ]; + + builder.make_named_natives(natives) +} diff --git a/aptos-move/framework/src/natives/randomness.rs b/aptos-move/framework/src/natives/randomness.rs index a608dfb0441fb..20b5964aca620 100644 --- a/aptos-move/framework/src/natives/randomness.rs +++ b/aptos-move/framework/src/natives/randomness.rs @@ -1,6 +1,10 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use aptos_gas_schedule::{ + gas_feature_versions::RELEASE_V1_23, + gas_params::natives::aptos_framework::RANDOMNESS_FETCH_AND_INC_COUNTER, +}; use aptos_native_interface::{ RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeError, SafeNativeResult, }; @@ -55,6 +59,10 @@ pub fn fetch_and_increment_txn_counter( _ty_args: Vec, _args: VecDeque, ) -> SafeNativeResult> { + if context.gas_feature_version() >= RELEASE_V1_23 { + context.charge(RANDOMNESS_FETCH_AND_INC_COUNTER)?; + } + let ctx = context.extensions_mut().get_mut::(); if !ctx.is_unbiasable() { return Err(SafeNativeError::Abort { @@ -62,7 +70,6 @@ pub fn fetch_and_increment_txn_counter( }); } - // TODO: charge gas? let ret = ctx.txn_local_state.to_vec(); ctx.increment(); Ok(smallvec![Value::vector_u8(ret)]) diff --git a/aptos-move/framework/src/natives/state_storage.rs b/aptos-move/framework/src/natives/state_storage.rs index 6f65cf3bc70c3..11f48877255d8 100644 --- a/aptos-move/framework/src/natives/state_storage.rs +++ b/aptos-move/framework/src/natives/state_storage.rs @@ -5,7 +5,7 @@ use aptos_gas_schedule::gas_params::natives::aptos_framework::*; use aptos_native_interface::{ RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeResult, }; -use aptos_types::vm_status::StatusCode; +use aptos_types::{state_store::state_key::StateKey, vm_status::StatusCode}; use aptos_vm_types::resolver::StateStorageView; use better_any::{Tid, TidAble}; use move_binary_format::errors::PartialVMError; @@ -20,11 +20,11 @@ use std::collections::VecDeque; /// Exposes the ability to query state storage utilization info to native functions. #[derive(Tid)] pub struct NativeStateStorageContext<'a> { - resolver: &'a dyn StateStorageView, + resolver: &'a dyn StateStorageView, } impl<'a> NativeStateStorageContext<'a> { - pub fn new(resolver: &'a dyn StateStorageView) -> Self { + pub fn new(resolver: &'a dyn StateStorageView) -> Self { Self { resolver } } } diff --git a/aptos-move/framework/src/natives/string_utils.rs b/aptos-move/framework/src/natives/string_utils.rs index b724fe2c476d8..394e2a6eb7dc2 100644 --- a/aptos-move/framework/src/natives/string_utils.rs +++ b/aptos-move/framework/src/natives/string_utils.rs @@ -13,12 +13,13 @@ use move_core_types::{ account_address::AccountAddress, language_storage::TypeTag, u256, - value::{MoveFieldLayout, MoveStructLayout, MoveTypeLayout}, + value::{MoveFieldLayout, MoveStructLayout, MoveTypeLayout, MASTER_ADDRESS_FIELD_OFFSET}, }; use move_vm_runtime::native_functions::NativeFunction; use move_vm_types::{ loaded_data::runtime_types::Type, - values::{Reference, Struct, Value, Vector, VectorRef}, + value_serde::FunctionValueExtension, + values::{Closure, Reference, Struct, Value, Vector, VectorRef}, }; use smallvec::{smallvec, SmallVec}; use std::{collections::VecDeque, fmt::Write, ops::Deref}; @@ -28,8 +29,8 @@ const EARGS_MISMATCH: u64 = 1; const EINVALID_FORMAT: u64 = 2; const EUNABLE_TO_FORMAT_DELAYED_FIELD: u64 = 3; -struct FormatContext<'a, 'b, 'c, 'd, 'e> { - context: &'d mut SafeNativeContext<'a, 'b, 'c, 'e>, +struct FormatContext<'a, 'b, 'c, 'd> { + context: &'d mut SafeNativeContext<'a, 'b, 'c>, should_charge_gas: bool, max_depth: usize, max_len: usize, @@ -188,8 +189,11 @@ fn native_format_impl( let addr = if fix_enabled { val.value_as::()? .unpack()? - .next() - .unwrap() + // The second field of a signer is always the master address regardless of which variants. + .nth(MASTER_ADDRESS_FIELD_OFFSET) + .ok_or_else(|| SafeNativeError::Abort { + abort_code: EINVALID_FORMAT, + })? .value_as::()? } else { val.value_as::()? @@ -209,6 +213,13 @@ fn native_format_impl( MoveTypeLayout::Vector(ty) => { if let MoveTypeLayout::U8 = ty.as_ref() { let bytes = val.value_as::>()?; + if context.context.timed_feature_enabled( + aptos_types::on_chain_config::TimedFeatureFlag::ChargeBytesForPrints, + ) { + context + .context + .charge(STRING_UTILS_PER_BYTE * NumBytes::new(bytes.len() as u64))?; + } write!(out, "0x{}", hex::encode(bytes)).unwrap(); return Ok(()); } @@ -268,7 +279,7 @@ fn native_format_impl( return Ok(()); } if context.type_tag { - write!(out, "{} {{", TypeTag::from(type_.clone())).unwrap(); + write!(out, "{} {{", type_.to_canonical_string()).unwrap(); } else { write!(out, "{} {{", type_.name.as_str()).unwrap(); }; @@ -308,9 +319,67 @@ fn native_format_impl( )?; out.push('}'); }, + MoveTypeLayout::Struct(MoveStructLayout::RuntimeVariants(variants)) => { + let struct_value = val.value_as::()?; + let (tag, elems) = struct_value.unpack_with_tag()?; + if (tag as usize) >= variants.len() { + return Err(SafeNativeError::Abort { + abort_code: EINVALID_FORMAT, + }); + } + out.push_str(&format!("#{}{{", tag)); + format_vector( + context, + variants[tag as usize].iter(), + elems.collect(), + depth, + !context.single_line, + out, + )?; + out.push('}'); + }, + MoveTypeLayout::Struct(MoveStructLayout::WithVariants(variants)) => { + let struct_value = val.value_as::()?; + let (tag, elems) = struct_value.unpack_with_tag()?; + if (tag as usize) >= variants.len() { + return Err(SafeNativeError::Abort { + abort_code: EINVALID_FORMAT, + }); + } + let variant = &variants[tag as usize]; + out.push_str(&format!("{}{{", variant.name)); + format_vector( + context, + variant.fields.iter(), + elems.collect(), + depth, + !context.single_line, + out, + )?; + out.push('}'); + }, + MoveTypeLayout::Function => { + // Notice that we print the undecorated value representation, + // avoiding potential loading of the function to get full + // decorated type information. + let (fun, args) = val.value_as::()?.unpack(); + let data = context + .context + .function_value_extension() + .get_serialization_data(fun.as_ref())?; + out.push_str(&fun.to_canonical_string()); + format_vector( + context, + data.captured_layouts.iter(), + args.collect(), + depth, + !context.single_line, + out, + )?; + out.push(')'); + }, - // This is unreachable because we check layout at the start. Still, return - // an error to be safe. + // Return error for native types MoveTypeLayout::Native(..) => { return Err(SafeNativeError::Abort { abort_code: EUNABLE_TO_FORMAT_DELAYED_FIELD, diff --git a/aptos-move/framework/src/natives/type_info.rs b/aptos-move/framework/src/natives/type_info.rs index ae8468db86ca4..b33e3271ee60a 100644 --- a/aptos-move/framework/src/natives/type_info.rs +++ b/aptos-move/framework/src/natives/type_info.rs @@ -21,9 +21,9 @@ fn type_of_internal(struct_tag: &StructTag) -> Result, std: let mut name = struct_tag.name.to_string(); if let Some(first_ty) = struct_tag.type_args.first() { write!(name, "<")?; - write!(name, "{}", first_ty)?; + write!(name, "{}", first_ty.to_canonical_string())?; for ty in struct_tag.type_args.iter().skip(1) { - write!(name, ", {}", ty)?; + write!(name, ", {}", ty.to_canonical_string())?; } write!(name, ">")?; } @@ -57,7 +57,7 @@ fn native_type_of( let type_tag = context.type_to_type_tag(&ty_args[0])?; if context.eval_gas(TYPE_INFO_TYPE_OF_PER_BYTE_IN_STR) > 0.into() { - let type_tag_str = type_tag.to_string(); + let type_tag_str = type_tag.to_canonical_string(); // Ideally, we would charge *before* the `type_to_type_tag()` and `type_tag.to_string()` calls above. // But there are other limits in place that prevent this native from being called with too much work. context @@ -92,7 +92,7 @@ fn native_type_name( context.charge(TYPE_INFO_TYPE_NAME_BASE)?; let type_tag = context.type_to_type_tag(&ty_args[0])?; - let type_name = type_tag.to_string(); + let type_name = type_tag.to_canonical_string(); // TODO: Ideally, we would charge *before* the `type_to_type_tag()` and `type_tag.to_string()` calls above. context.charge(TYPE_INFO_TYPE_NAME_PER_BYTE_IN_STR * NumBytes::new(type_name.len() as u64))?; @@ -159,7 +159,7 @@ mod tests { type_args: vec![TypeTag::Vector(Box::new(TypeTag::U8))], }; - let dummy_as_strings = dummy_st.to_string(); + let dummy_as_strings = dummy_st.to_canonical_string(); let mut dummy_as_strings = dummy_as_strings.split("::"); let dummy_as_type_of = type_of_internal(&dummy_st).unwrap().pop().unwrap(); let dummy_as_type_of: Struct = dummy_as_type_of.cast().unwrap(); diff --git a/aptos-move/framework/src/natives/util.rs b/aptos-move/framework/src/natives/util.rs index da30858dc4edf..a10509eaee8d9 100644 --- a/aptos-move/framework/src/natives/util.rs +++ b/aptos-move/framework/src/natives/util.rs @@ -8,7 +8,9 @@ use aptos_native_interface::{ }; use move_core_types::gas_algebra::NumBytes; use move_vm_runtime::native_functions::NativeFunction; -use move_vm_types::{loaded_data::runtime_types::Type, values::Value}; +use move_vm_types::{ + loaded_data::runtime_types::Type, value_serde::ValueSerDeContext, values::Value, +}; use smallvec::{smallvec, SmallVec}; use std::collections::VecDeque; @@ -40,7 +42,13 @@ fn native_from_bytes( context.charge( UTIL_FROM_BYTES_BASE + UTIL_FROM_BYTES_PER_BYTE * NumBytes::new(bytes.len() as u64), )?; - let val = match Value::simple_deserialize(&bytes, &layout) { + + let function_value_extension = context.function_value_extension(); + let val = match ValueSerDeContext::new() + .with_legacy_signer() + .with_func_args_deserialization(&function_value_extension) + .deserialize(&bytes, &layout) + { Some(val) => val, None => { return Err(SafeNativeError::Abort { diff --git a/aptos-move/framework/src/prover.rs b/aptos-move/framework/src/prover.rs index 5351388a66ad1..44e437a517327 100644 --- a/aptos-move/framework/src/prover.rs +++ b/aptos-move/framework/src/prover.rs @@ -2,21 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 use crate::build_model; +use anyhow::bail; use codespan_reporting::{ diagnostic::Severity, term::termcolor::{ColorChoice, StandardStream}, }; -use log::LevelFilter; +use log::{info, LevelFilter}; use move_core_types::account_address::AccountAddress; -use move_model::metadata::{CompilerVersion, LanguageVersion}; +use move_model::{ + metadata::{CompilerVersion, LanguageVersion}, + model::{GlobalEnv, VerificationScope}, +}; +use move_prover::cli::Options; use std::{ collections::{BTreeMap, BTreeSet}, + fs, path::Path, time::Instant, }; use tempfile::TempDir; -#[derive(Debug, Clone, clap::Parser, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, Default, clap::Parser, serde::Serialize, serde::Deserialize)] pub struct ProverOptions { /// Verbosity level #[clap(long, short)] @@ -27,6 +33,12 @@ pub struct ProverOptions { #[clap(long, short)] pub filter: Option, + /// Scopes verification to the specified function. This can either be a name of the + /// form "mod::func" or simply "func", in the later case every matching function is + /// taken. + #[clap(long, short)] + pub only: Option, + /// Whether to display additional information in error reports. This may help /// debugging but also can make verification slower. #[clap(long, short)] @@ -38,20 +50,30 @@ pub struct ProverOptions { pub cvc5: bool, /// The depth until which stratified functions are expanded. - #[clap(long, default_value_t = 6)] - pub stratification_depth: usize, + #[clap(long)] + pub stratification_depth: Option, /// A seed for the prover. - #[clap(long, default_value_t = 0)] - pub random_seed: usize, + #[clap(long)] + pub random_seed: Option, /// The number of cores to use for parallel processing of verification conditions. - #[clap(long, default_value_t = 4)] - pub proc_cores: usize, + #[clap(long)] + pub proc_cores: Option, + + /// The number of shards to split the verification problem into. Shards are + /// processed sequentially. This can be used to ease memory pressure for verification + /// of large packages. + #[clap(long)] + pub shards: Option, + + /// If there are multiple shards, the shard to which verification shall be narrowed. + #[clap(long)] + pub only_shard: Option, /// A (soft) timeout for the solver, per verification condition, in seconds. - #[clap(long, default_value_t = 40)] - pub vc_timeout: usize, + #[clap(long)] + pub vc_timeout: Option, /// Whether to disable global timeout overwrite. /// With this flag set to true, the value set by "--vc-timeout" will be used globally @@ -84,33 +106,28 @@ pub struct ProverOptions { #[clap(long)] pub dump: bool, + /// Whether to benchmark verification. If selected, each verification target in the + /// current package will be verified independently with timing recorded. This attempts + /// to detect timeouts. A benchmark report will be written to `prover_benchmark.fun_data` in the + /// package directory. The command also writes a `prover_benchmark.svg` graphic, which + /// is build from the data in the file above, comparing with any other `*.fun_data` files + /// in the package directory. Thus, you can rename the data file to something like + /// `prover_benchmark_v1.fun_data` and in the next run, compare benchmarks in the `.svg` + /// file from multiple runs. + #[clap(long = "benchmark")] + pub benchmark: bool, + + /// Whether to skip verification of type instantiations of functions. This may miss + /// some verification conditions if different type instantiations can create + /// different behavior via type reflection or storage access, but can speed up + /// verification. + #[clap(long = "skip-instance-check")] + pub skip_instance_check: bool, + #[clap(skip)] pub for_test: bool, } -impl Default for ProverOptions { - fn default() -> Self { - Self { - verbosity: None, - filter: None, - trace: false, - cvc5: false, - stratification_depth: 6, - random_seed: 0, - proc_cores: 4, - vc_timeout: 40, - disallow_global_timeout_to_be_overwritten: false, - check_inconsistency: false, - unconditional_abort_as_inconsistency: false, - keep_loops: false, - loop_unroll: None, - stable_test_output: false, - dump: false, - for_test: false, - } - } -} - impl ProverOptions { /// Runs the move prover on the package. pub fn prove( @@ -123,9 +140,14 @@ impl ProverOptions { language_version: Option, skip_attribute_checks: bool, known_attributes: &BTreeSet, + experiments: &[String], ) -> anyhow::Result<()> { + if compiler_version.is_some_and(|v| v == CompilerVersion::V1) { + return Err(anyhow::Error::msg("Compiler v1 is not supported")); + } let now = Instant::now(); let for_test = self.for_test; + let benchmark = self.benchmark; let mut model = build_model( dev_mode, package_path, @@ -136,8 +158,11 @@ impl ProverOptions { language_version, skip_attribute_checks, known_attributes.clone(), + experiments.to_vec(), )?; - let mut options = self.convert_options(); + let mut options = self.convert_options(package_path)?; + options.language_version = language_version; + options.model_builder.language_version = language_version.unwrap_or_default(); // Need to ensure a distinct output.bpl file for concurrent execution. In non-test // mode, we actually want to use the static output.bpl for debugging purposes let _temp_holder = if for_test { @@ -159,74 +184,90 @@ impl ProverOptions { options.backend.custom_natives = Some(move_prover_boogie_backend::options::CustomNativeOptions { template_bytes: include_bytes!("aptos-natives.bpl").to_vec(), - module_instance_names: vec![( - "0x1::object".to_string(), - "object_instances".to_string(), - true, - )], + module_instance_names: move_prover_boogie_backend::options::custom_native_options(), }); - let mut writer = StandardStream::stderr(ColorChoice::Auto); - if compiler_version.unwrap_or_default() == CompilerVersion::V1 { - move_prover::run_move_prover_with_model(&mut model, &mut writer, options, Some(now))?; + if benchmark { + // Special mode of benchmarking + run_prover_benchmark(package_path, &mut model, options)?; } else { + let mut writer = StandardStream::stderr(ColorChoice::Auto); move_prover::run_move_prover_with_model_v2(&mut model, &mut writer, options, now)?; } Ok(()) } - fn convert_options(self) -> move_prover::cli::Options { + fn convert_options(self, package_path: &Path) -> anyhow::Result { + let prover_toml = package_path.join("Prover.toml"); + let base_opts = if prover_toml.exists() { + Options::create_from_toml_file(prover_toml.to_string_lossy().as_ref())? + } else { + Options::default() + }; let verbosity_level = if let Some(level) = self.verbosity { level } else if self.for_test { LevelFilter::Warn } else { - LevelFilter::Info + base_opts.verbosity_level }; - let opts = move_prover::cli::Options { + let opts = Options { output_path: "".to_string(), verbosity_level, prover: move_prover_bytecode_pipeline::options::ProverOptions { - stable_test_output: self.stable_test_output, + verify_scope: if let Some(name) = self.only { + VerificationScope::Only(name) + } else { + base_opts.prover.verify_scope.clone() + }, + stable_test_output: self.stable_test_output || base_opts.prover.stable_test_output, auto_trace_level: if self.trace { move_prover_bytecode_pipeline::options::AutoTraceLevel::VerifiedFunction } else { - move_prover_bytecode_pipeline::options::AutoTraceLevel::Off + base_opts.prover.auto_trace_level }, report_severity: Severity::Warning, - dump_bytecode: self.dump, + dump_bytecode: self.dump || base_opts.prover.dump_bytecode, dump_cfg: false, - check_inconsistency: self.check_inconsistency, - unconditional_abort_as_inconsistency: self.unconditional_abort_as_inconsistency, - skip_loop_analysis: self.keep_loops, - ..Default::default() + check_inconsistency: self.check_inconsistency + || base_opts.prover.check_inconsistency, + unconditional_abort_as_inconsistency: self.unconditional_abort_as_inconsistency + || base_opts.prover.unconditional_abort_as_inconsistency, + skip_loop_analysis: self.keep_loops || base_opts.prover.skip_loop_analysis, + ..base_opts.prover.clone() }, backend: move_prover_boogie_backend::options::BoogieOptions { - use_cvc5: self.cvc5, + use_cvc5: self.cvc5 || base_opts.backend.use_cvc5, boogie_flags: vec![], - generate_smt: self.dump, - stratification_depth: self.stratification_depth, - proc_cores: self.proc_cores, - vc_timeout: self.vc_timeout, + generate_smt: self.dump || base_opts.backend.generate_smt, + stratification_depth: self + .stratification_depth + .unwrap_or(base_opts.backend.stratification_depth), + proc_cores: self.proc_cores.unwrap_or(base_opts.backend.proc_cores), + shards: self.shards.unwrap_or(base_opts.backend.shards), + only_shard: self.only_shard.or(base_opts.backend.only_shard), + vc_timeout: self.vc_timeout.unwrap_or(base_opts.backend.vc_timeout), global_timeout_overwrite: !self.disallow_global_timeout_to_be_overwritten, - keep_artifacts: self.dump, - stable_test_output: self.stable_test_output, + keep_artifacts: self.dump || base_opts.backend.keep_artifacts, + stable_test_output: self.stable_test_output || base_opts.backend.stable_test_output, z3_trace_file: if self.dump { Some("z3.trace".to_string()) } else { None }, custom_natives: None, - loop_unroll: self.loop_unroll, - ..Default::default() + loop_unroll: self.loop_unroll.or(base_opts.backend.loop_unroll), + skip_instance_check: self.skip_instance_check + || base_opts.backend.skip_instance_check, + ..base_opts.backend }, - ..Default::default() + ..base_opts }; if self.for_test { opts.setup_logging_for_test(); } else { opts.setup_logging() } - opts + Ok(opts) } pub fn default_for_test() -> Self { @@ -236,3 +277,106 @@ impl ProverOptions { } } } + +fn run_prover_benchmark( + package_path: &Path, + env: &mut GlobalEnv, + mut options: Options, +) -> anyhow::Result<()> { + info!("starting prover benchmark"); + // Determine sources and dependencies from the env + let mut sources = BTreeSet::new(); + let mut deps: Vec = vec![]; + for module in env.get_modules() { + let file_name = module.get_source_path().to_string_lossy().to_string(); + if module.is_primary_target() { + sources.insert(module.get_source_path().to_string_lossy().to_string()); + } else if let Some(p) = Path::new(&file_name) + .parent() + .and_then(|p| p.canonicalize().ok()) + { + // The prover doesn't like to have `p` and `p/s` as dep paths, filter those out + let p = p.to_string_lossy().to_string(); + let mut done = false; + for d in &mut deps { + if p.starts_with(&*d) { + // p is subsumed + done = true; + break; + } else if d.starts_with(&p) { + // p is more general or equal to d, swap it out + *d = p.to_string(); + done = true; + break; + } + } + if !done { + deps.push(p) + } + } else { + bail!("invalid file path `{}`", file_name) + } + } + + // Enrich the prover options by the aliases in the env + for (alias, address) in env.get_address_alias_map() { + options.move_named_address_values.push(format!( + "{}={}", + alias.display(env.symbol_pool()), + address.to_hex_literal() + )) + } + + // Create or override a prover_benchmark.toml in the package dir, reflection `options` + let config_file = package_path.join("prover_benchmark.toml"); + let toml = toml::to_string(&options)?; + std::fs::write(&config_file, toml)?; + + // Args for the benchmark API + let mut args = vec![ + // Command name + "bench".to_string(), + // Benchmark by function not module + "--func".to_string(), + // Use as the config the file we derived from `options` + "--config".to_string(), + config_file.to_string_lossy().to_string(), + ]; + + // Add deps and sources to args and run the tool + for dep in deps { + args.push("-d".to_string()); + args.push(dep) + } + args.extend(sources); + move_prover_lab::benchmark::benchmark(&args); + + // The benchmark stores the result in `.fun_data`, now plot it. + // If there are any other `*.fun_data` files, add them to the plot. + let mut args = vec![ + "plot".to_string(), + format!( + "--out={}", + config_file + .as_path() + .with_extension("svg") + .to_string_lossy() + ), + "--sort".to_string(), + ]; + let main_data_file = config_file + .as_path() + .with_extension("fun_data") + .to_string_lossy() + .to_string(); + args.push(main_data_file.clone()); + let paths = fs::read_dir(package_path)?; + for p in paths.flatten() { + let p = p.path().as_path().to_string_lossy().to_string(); + // Only use this if its is not the main data file we already added + if p.ends_with(".fun_data") && !p.ends_with("/prover_benchmark.fun_data") { + args.push(p) + } + } + move_prover_lab::plot::plot_svg(&args) +} diff --git a/aptos-move/framework/src/release_builder.rs b/aptos-move/framework/src/release_builder.rs index 94d267fbde680..da7403be3d4c0 100644 --- a/aptos-move/framework/src/release_builder.rs +++ b/aptos-move/framework/src/release_builder.rs @@ -28,6 +28,13 @@ pub struct ReleaseOptions { /// generated. #[clap(long)] pub rust_bindings: Vec, + + /// For each package, whether it should be built with using latest language features. + /// Generally packages being deployed to testnet/mainnet need to use default features, + /// while those that don't (like aptos-experimental) can use latest language features. + #[clap(long)] + pub package_use_latest_language: Vec, + /// The path to the file where to place the release bundle. #[clap(long, default_value = "head.mrb", value_parser)] pub output: PathBuf, @@ -41,19 +48,29 @@ impl ReleaseOptions { build_options, packages, rust_bindings, + package_use_latest_language, output, } = self; let mut released_packages = vec![]; let mut source_paths = vec![]; - for (package_path, rust_binding_path) in packages.into_iter().zip(rust_bindings.into_iter()) + for ((package_path, rust_binding_path), use_latest_language) in packages + .into_iter() + .zip(rust_bindings.into_iter()) + .zip(package_use_latest_language.into_iter()) { - let built = BuiltPackage::build(package_path.clone(), build_options.clone()) - .with_context(|| { + let cur_build_options = if use_latest_language { + build_options.clone().set_latest_language() + } else { + build_options.clone() + }; + let built = BuiltPackage::build(package_path.clone(), cur_build_options).with_context( + || { format!( "Failed to build package at path: {}", package_path.display() ) - })?; + }, + )?; if !rust_binding_path.is_empty() { let abis = built .extract_abis() diff --git a/aptos-move/framework/src/release_bundle.rs b/aptos-move/framework/src/release_bundle.rs index 3cc6ee3f65920..f039cbfeb53b1 100644 --- a/aptos-move/framework/src/release_bundle.rs +++ b/aptos-move/framework/src/release_bundle.rs @@ -3,6 +3,7 @@ use crate::{built_package::BuiltPackage, natives::code::PackageMetadata, path_in_crate}; use anyhow::Context; +use aptos_crypto::HashValue; use aptos_types::account_address::AccountAddress; use move_binary_format::{access::ModuleAccess, errors::PartialVMError, CompiledModule}; use move_command_line_common::files::{extension_equals, find_filenames, MOVE_EXTENSION}; @@ -165,7 +166,7 @@ impl ReleasePackage { for_address: AccountAddress, out: PathBuf, ) -> anyhow::Result<()> { - self.generate_script_proposal_impl(for_address, out, false, false, Vec::new()) + self.generate_script_proposal_impl(for_address, out, false, false, None) } pub fn generate_script_proposal_testnet( @@ -173,14 +174,14 @@ impl ReleasePackage { for_address: AccountAddress, out: PathBuf, ) -> anyhow::Result<()> { - self.generate_script_proposal_impl(for_address, out, true, false, Vec::new()) + self.generate_script_proposal_impl(for_address, out, true, false, None) } pub fn generate_script_proposal_multi_step( &self, for_address: AccountAddress, out: PathBuf, - next_execution_hash: Vec, + next_execution_hash: Option, ) -> anyhow::Result<()> { self.generate_script_proposal_impl(for_address, out, true, true, next_execution_hash) } @@ -191,7 +192,7 @@ impl ReleasePackage { out: PathBuf, is_testnet: bool, is_multi_step: bool, - next_execution_hash: Vec, + next_execution_hash: Option, ) -> anyhow::Result<()> { let writer = CodeWriter::new(Loc::default()); emitln!( @@ -225,14 +226,14 @@ impl ReleasePackage { } else { emitln!(writer, "fun main(proposal_id: u64){"); writer.indent(); - Self::generate_next_execution_hash_blob(&writer, for_address, next_execution_hash); + generate_next_execution_hash_blob(&writer, for_address, next_execution_hash); } emitln!(writer, "let code = vector::empty();"); for i in 0..self.code.len() { emitln!(writer, "let chunk{} = ", i); - Self::generate_blob_as_hex_string(&writer, &self.code[i]); + generate_blob_as_hex_string(&writer, &self.code[i]); emitln!(writer, ";"); emitln!(writer, "vector::push_back(&mut code, chunk{});", i); } @@ -253,7 +254,7 @@ impl ReleasePackage { }; let chunk = metadata.drain(0..to_drain).collect::>(); emit!(writer, "let chunk{} = ", i); - Self::generate_blob_as_hex_string(&writer, &chunk); + generate_blob_as_hex_string(&writer, &chunk); emitln!(writer, ";") } @@ -272,28 +273,31 @@ impl ReleasePackage { writer.process_result(|s| std::fs::write(&out, s))?; Ok(()) } +} - fn generate_blob_as_hex_string(writer: &CodeWriter, data: &[u8]) { - emit!(writer, "x\""); - for b in data.iter() { - emit!(writer, "{:02x}", b); - } - emit!(writer, "\""); +pub fn generate_blob_as_hex_string(writer: &CodeWriter, data: &[u8]) { + emit!(writer, "x\""); + for b in data.iter() { + emit!(writer, "{:02x}", b); } + emit!(writer, "\""); +} - fn generate_next_execution_hash_blob( - writer: &CodeWriter, - for_address: AccountAddress, - next_execution_hash: Vec, - ) { - if next_execution_hash == "vector::empty()".as_bytes() { +pub fn generate_next_execution_hash_blob( + writer: &CodeWriter, + for_address: AccountAddress, + next_execution_hash: Option, +) { + match next_execution_hash { + None => { emitln!( - writer, - "let framework_signer = aptos_governance::resolve_multi_step_proposal(proposal_id, @{}, {});\n", - for_address, - "vector::empty()", - ); - } else { + writer, + "let framework_signer = aptos_governance::resolve_multi_step_proposal(proposal_id, @{}, {});\n", + for_address, + "x\"\"", + ); + }, + Some(next_execution_hash) => { emitln!( writer, "let framework_signer = aptos_governance::resolve_multi_step_proposal(" @@ -301,14 +305,11 @@ impl ReleasePackage { writer.indent(); emitln!(writer, "proposal_id,"); emitln!(writer, "@{},", for_address); - emit!(writer, "vector["); - for b in next_execution_hash.iter() { - emit!(writer, "{}u8,", b); - } - emitln!(writer, "],"); + generate_blob_as_hex_string(writer, next_execution_hash.as_slice()); + emit!(writer, ","); writer.unindent(); emitln!(writer, ");"); - } + }, } } diff --git a/aptos-move/framework/table-natives/src/lib.rs b/aptos-move/framework/table-natives/src/lib.rs index d306b7a025890..77d0372fa81b7 100644 --- a/aptos-move/framework/table-natives/src/lib.rs +++ b/aptos-move/framework/table-natives/src/lib.rs @@ -28,7 +28,7 @@ pub use move_table_extension::{TableHandle, TableInfo, TableResolver}; use move_vm_runtime::native_functions::NativeFunctionTable; use move_vm_types::{ loaded_data::runtime_types::Type, - value_serde::{deserialize_and_allow_delayed_values, serialize_and_allow_delayed_values}, + value_serde::{FunctionValueExtension, ValueSerDeContext}, values::{GlobalValue, Reference, StructRef, Value}, }; use sha3::{Digest, Sha3_256}; @@ -118,7 +118,10 @@ impl<'a> NativeTableContext<'a> { } /// Computes the change set from a NativeTableContext. - pub fn into_change_set(self) -> PartialVMResult { + pub fn into_change_set( + self, + function_value_extension: &impl FunctionValueExtension, + ) -> PartialVMResult { let NativeTableContext { table_data, .. } = self; let TableData { new_tables, @@ -141,10 +144,24 @@ impl<'a> NativeTableContext<'a> { match op { Op::New(val) => { - entries.insert(key, Op::New(serialize_value(&value_layout_info, &val)?)); + entries.insert( + key, + Op::New(serialize_value( + function_value_extension, + &value_layout_info, + &val, + )?), + ); }, Op::Modify(val) => { - entries.insert(key, Op::Modify(serialize_value(&value_layout_info, &val)?)); + entries.insert( + key, + Op::Modify(serialize_value( + function_value_extension, + &value_layout_info, + &val, + )?), + ); }, Op::Delete => { entries.insert(key, Op::Delete); @@ -204,26 +221,33 @@ impl LayoutInfo { impl Table { fn get_or_create_global_value( &mut self, - context: &NativeTableContext, + function_value_extension: &dyn FunctionValueExtension, + table_context: &NativeTableContext, key: Vec, ) -> PartialVMResult<(&mut GlobalValue, Option>)> { Ok(match self.content.entry(key) { Entry::Vacant(entry) => { // If there is an identifier mapping, we need to pass layout to // ensure it gets recorded. - let data = context.resolver.resolve_table_entry_bytes_with_layout( - &self.handle, - entry.key(), - if self.value_layout_info.has_identifier_mappings { - Some(&self.value_layout_info.layout) - } else { - None - }, - )?; + let data = table_context + .resolver + .resolve_table_entry_bytes_with_layout( + &self.handle, + entry.key(), + if self.value_layout_info.has_identifier_mappings { + Some(&self.value_layout_info.layout) + } else { + None + }, + )?; let (gv, loaded) = match data { Some(val_bytes) => { - let val = deserialize_value(&self.value_layout_info, &val_bytes)?; + let val = deserialize_value( + function_value_extension, + &val_bytes, + &self.value_layout_info, + )?; ( GlobalValue::cached(val)?, Some(NumBytes::new(val_bytes.len() as u64)), @@ -341,6 +365,7 @@ fn native_add_box( context.charge(ADD_BOX_BASE)?; + let function_value_extension = context.function_value_extension(); let table_context = context.extensions().get::(); let mut table_data = table_context.table_data.borrow_mut(); @@ -350,10 +375,18 @@ fn native_add_box( let table = table_data.get_or_create_table(context, handle, &ty_args[0], &ty_args[2])?; - let key_bytes = serialize_key(&table.key_layout, &key)?; + let key_bytes = serialize_key(&function_value_extension, &table.key_layout, &key)?; let key_cost = ADD_BOX_PER_BYTE_SERIALIZED * NumBytes::new(key_bytes.len() as u64); - let (gv, loaded) = table.get_or_create_global_value(table_context, key_bytes)?; + let (gv, loaded) = + table.get_or_create_global_value(&function_value_extension, table_context, key_bytes)?; + let mem_usage = gv.view().map(|val| { + u64::from( + context + .abs_val_gas_params() + .abstract_heap_size(&val, context.gas_feature_version()), + ) + }); let res = match gv.move_to(val) { Ok(_) => Ok(smallvec![]), @@ -366,6 +399,9 @@ fn native_add_box( // TODO(Gas): Figure out a way to charge this earlier. context.charge(key_cost)?; + if let Some(amount) = mem_usage { + context.use_heap_memory(amount); + } charge_load_cost(context, loaded)?; res @@ -381,6 +417,7 @@ fn native_borrow_box( context.charge(BORROW_BOX_BASE)?; + let function_value_extension = context.function_value_extension(); let table_context = context.extensions().get::(); let mut table_data = table_context.table_data.borrow_mut(); @@ -389,10 +426,18 @@ fn native_borrow_box( let table = table_data.get_or_create_table(context, handle, &ty_args[0], &ty_args[2])?; - let key_bytes = serialize_key(&table.key_layout, &key)?; + let key_bytes = serialize_key(&function_value_extension, &table.key_layout, &key)?; let key_cost = BORROW_BOX_PER_BYTE_SERIALIZED * NumBytes::new(key_bytes.len() as u64); - let (gv, loaded) = table.get_or_create_global_value(table_context, key_bytes)?; + let (gv, loaded) = + table.get_or_create_global_value(&function_value_extension, table_context, key_bytes)?; + let mem_usage = gv.view().map(|val| { + u64::from( + context + .abs_val_gas_params() + .abstract_heap_size(&val, context.gas_feature_version()), + ) + }); let res = match gv.borrow_global() { Ok(ref_val) => Ok(smallvec![ref_val]), @@ -405,6 +450,9 @@ fn native_borrow_box( // TODO(Gas): Figure out a way to charge this earlier. context.charge(key_cost)?; + if let Some(amount) = mem_usage { + context.use_heap_memory(amount); + } charge_load_cost(context, loaded)?; res @@ -420,6 +468,7 @@ fn native_contains_box( context.charge(CONTAINS_BOX_BASE)?; + let function_value_extension = context.function_value_extension(); let table_context = context.extensions().get::(); let mut table_data = table_context.table_data.borrow_mut(); @@ -428,16 +477,27 @@ fn native_contains_box( let table = table_data.get_or_create_table(context, handle, &ty_args[0], &ty_args[2])?; - let key_bytes = serialize_key(&table.key_layout, &key)?; + let key_bytes = serialize_key(&function_value_extension, &table.key_layout, &key)?; let key_cost = CONTAINS_BOX_PER_BYTE_SERIALIZED * NumBytes::new(key_bytes.len() as u64); - let (gv, loaded) = table.get_or_create_global_value(table_context, key_bytes)?; + let (gv, loaded) = + table.get_or_create_global_value(&function_value_extension, table_context, key_bytes)?; + let mem_usage = gv.view().map(|val| { + u64::from( + context + .abs_val_gas_params() + .abstract_heap_size(&val, context.gas_feature_version()), + ) + }); let exists = Value::bool(gv.exists()?); drop(table_data); // TODO(Gas): Figure out a way to charge this earlier. context.charge(key_cost)?; + if let Some(amount) = mem_usage { + context.use_heap_memory(amount); + } charge_load_cost(context, loaded)?; Ok(smallvec![exists]) @@ -453,6 +513,7 @@ fn native_remove_box( context.charge(REMOVE_BOX_BASE)?; + let function_value_extension = context.function_value_extension(); let table_context = context.extensions().get::(); let mut table_data = table_context.table_data.borrow_mut(); @@ -461,10 +522,19 @@ fn native_remove_box( let table = table_data.get_or_create_table(context, handle, &ty_args[0], &ty_args[2])?; - let key_bytes = serialize_key(&table.key_layout, &key)?; + let key_bytes = serialize_key(&function_value_extension, &table.key_layout, &key)?; let key_cost = REMOVE_BOX_PER_BYTE_SERIALIZED * NumBytes::new(key_bytes.len() as u64); - let (gv, loaded) = table.get_or_create_global_value(table_context, key_bytes)?; + let (gv, loaded) = + table.get_or_create_global_value(&function_value_extension, table_context, key_bytes)?; + let mem_usage = gv.view().map(|val| { + u64::from( + context + .abs_val_gas_params() + .abstract_heap_size(&val, context.gas_feature_version()), + ) + }); + let res = match gv.move_from() { Ok(val) => Ok(smallvec![val]), Err(_) => Err(SafeNativeError::Abort { @@ -476,6 +546,9 @@ fn native_remove_box( // TODO(Gas): Figure out a way to charge this earlier. context.charge(key_cost)?; + if let Some(amount) = mem_usage { + context.use_heap_memory(amount); + } charge_load_cost(context, loaded)?; res @@ -528,34 +601,55 @@ fn get_table_handle(table: &StructRef) -> PartialVMResult { Ok(TableHandle(handle)) } -fn serialize_key(layout: &MoveTypeLayout, key: &Value) -> PartialVMResult> { - key.simple_serialize(layout) +fn serialize_key( + function_value_extension: &dyn FunctionValueExtension, + layout: &MoveTypeLayout, + key: &Value, +) -> PartialVMResult> { + ValueSerDeContext::new() + .with_func_args_deserialization(function_value_extension) + .serialize(key, layout)? .ok_or_else(|| partial_extension_error("cannot serialize table key")) } fn serialize_value( + function_value_extension: &dyn FunctionValueExtension, layout_info: &LayoutInfo, val: &Value, ) -> PartialVMResult<(Bytes, Option>)> { let serialization_result = if layout_info.has_identifier_mappings { // Value contains delayed fields, so we should be able to serialize it. - serialize_and_allow_delayed_values(val, layout_info.layout.as_ref())? + ValueSerDeContext::new() + .with_delayed_fields_serde() + .with_func_args_deserialization(function_value_extension) + .serialize(val, layout_info.layout.as_ref())? .map(|bytes| (bytes.into(), Some(layout_info.layout.clone()))) } else { // No delayed fields, make sure serialization fails if there are any // native values. - val.simple_serialize(layout_info.layout.as_ref()) + ValueSerDeContext::new() + .with_func_args_deserialization(function_value_extension) + .serialize(val, layout_info.layout.as_ref())? .map(|bytes| (bytes.into(), None)) }; serialization_result.ok_or_else(|| partial_extension_error("cannot serialize table value")) } -fn deserialize_value(layout_info: &LayoutInfo, bytes: &[u8]) -> PartialVMResult { +fn deserialize_value( + function_value_extension: &dyn FunctionValueExtension, + bytes: &[u8], + layout_info: &LayoutInfo, +) -> PartialVMResult { let layout = layout_info.layout.as_ref(); let deserialization_result = if layout_info.has_identifier_mappings { - deserialize_and_allow_delayed_values(bytes, layout) + ValueSerDeContext::new() + .with_func_args_deserialization(function_value_extension) + .with_delayed_fields_serde() + .deserialize(bytes, layout) } else { - Value::simple_deserialize(bytes, layout) + ValueSerDeContext::new() + .with_func_args_deserialization(function_value_extension) + .deserialize(bytes, layout) }; deserialization_result.ok_or_else(|| partial_extension_error("cannot deserialize table value")) } diff --git a/aptos-move/framework/tests/move_prover_tests.rs b/aptos-move/framework/tests/move_prover_tests.rs index 466aea472db31..99e54d3c6120f 100644 --- a/aptos-move/framework/tests/move_prover_tests.rs +++ b/aptos-move/framework/tests/move_prover_tests.rs @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_framework::{extended_checks, prover::ProverOptions}; +use move_binary_format::file_format_common::VERSION_DEFAULT; +use move_model::metadata::{CompilerVersion, LanguageVersion}; use std::{collections::BTreeMap, path::PathBuf}; const ENV_TEST_INCONSISTENCY: &str = "MVP_TEST_INCONSISTENCY"; @@ -27,7 +29,11 @@ pub fn read_env_var(v: &str) -> String { std::env::var(v).unwrap_or_else(|_| String::new()) } -pub fn run_prover_for_pkg(path_to_pkg: impl Into) { +pub fn run_prover_for_pkg( + path_to_pkg: impl Into, + shards: usize, + only_shard: Option, +) { let pkg_path = path_in_crate(path_to_pkg); let mut options = ProverOptions::default_for_test(); let no_tools = read_env_var("BOOGIE_EXE").is_empty() @@ -45,44 +51,68 @@ pub fn run_prover_for_pkg(path_to_pkg: impl Into) { let unconditional_abort_inconsistency_flag = read_env_var(ENV_TEST_UNCONDITIONAL_ABORT_AS_INCONSISTENCY) == "1"; let disallow_timeout_overwrite = read_env_var(ENV_TEST_DISALLOW_TIMEOUT_OVERWRITE) == "1"; + options.shards = Some(shards); + options.only_shard = only_shard; options.check_inconsistency = inconsistency_flag; options.unconditional_abort_as_inconsistency = unconditional_abort_inconsistency_flag; options.disallow_global_timeout_to_be_overwritten = disallow_timeout_overwrite; options.vc_timeout = read_env_var(ENV_TEST_VC_TIMEOUT) .parse::() - .unwrap_or(options.vc_timeout); + .ok() + .or(options.vc_timeout); let skip_attribute_checks = false; options .prove( false, pkg_path.as_path(), BTreeMap::default(), - None, - None, - None, + Some(VERSION_DEFAULT), + Some(CompilerVersion::latest_stable()), + Some(LanguageVersion::latest_stable()), skip_attribute_checks, extended_checks::get_all_attribute_names(), + &[], ) .unwrap() } } #[test] -fn move_framework_prover_tests() { - run_prover_for_pkg("aptos-framework"); +fn move_framework_prover_tests_shard1() { + run_prover_for_pkg("aptos-framework", 5, Some(1)); +} + +#[test] +fn move_framework_prover_tests_shard2() { + run_prover_for_pkg("aptos-framework", 5, Some(2)); +} + +#[test] +fn move_framework_prover_tests_shard3() { + run_prover_for_pkg("aptos-framework", 5, Some(3)); +} + +#[test] +fn move_framework_prover_tests_shard4() { + run_prover_for_pkg("aptos-framework", 5, Some(4)); +} + +#[test] +fn move_framework_prover_tests_shard5() { + run_prover_for_pkg("aptos-framework", 5, Some(5)); } #[test] fn move_token_prover_tests() { - run_prover_for_pkg("aptos-token"); + run_prover_for_pkg("aptos-token", 1, None); } #[test] fn move_aptos_stdlib_prover_tests() { - run_prover_for_pkg("aptos-stdlib"); + run_prover_for_pkg("aptos-stdlib", 1, None); } #[test] fn move_stdlib_prover_tests() { - run_prover_for_pkg("move-stdlib"); + run_prover_for_pkg("move-stdlib", 1, None); } diff --git a/aptos-move/framework/tests/move_unit_test.rs b/aptos-move/framework/tests/move_unit_test.rs index 5a896737e1c38..2d90498e931bc 100644 --- a/aptos-move/framework/tests/move_unit_test.rs +++ b/aptos-move/framework/tests/move_unit_test.rs @@ -2,23 +2,21 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use aptos_framework::{extended_checks, path_in_crate}; +use aptos_framework::{extended_checks, path_in_crate, BuildOptions}; use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters, LATEST_GAS_FEATURE_VERSION}; use aptos_types::on_chain_config::{ aptos_test_feature_flags_genesis, Features, TimedFeaturesBuilder, }; use aptos_vm::natives; use move_cli::base::test::{run_move_unit_tests, UnitTestResult}; -use move_command_line_common::env::get_move_compiler_v2_from_env; -use move_model::metadata::CompilerVersion; use move_package::CompilerConfig; use move_unit_test::UnitTestingConfig; use move_vm_runtime::native_functions::NativeFunctionTable; use tempfile::tempdir; -fn run_tests_for_pkg(path_to_pkg: impl Into) { +fn run_tests_for_pkg(path_to_pkg: impl Into, use_latest_language: bool) { let pkg_path = path_in_crate(path_to_pkg); - let mut compiler_config = CompilerConfig { + let compiler_config = CompilerConfig { known_attributes: extended_checks::get_all_attribute_names().clone(), ..Default::default() }; @@ -29,43 +27,40 @@ fn run_tests_for_pkg(path_to_pkg: impl Into) { full_model_generation: true, // Run extended checks also on test code ..Default::default() }; + if use_latest_language { + let latest_build_options = BuildOptions::default().set_latest_language(); + build_config.compiler_config.bytecode_version = latest_build_options.bytecode_version; + build_config.compiler_config.language_version = latest_build_options.language_version; + } - let mut ok = run_move_unit_tests( + let utc = UnitTestingConfig { + filter: std::env::var("TEST_FILTER").ok(), + report_statistics: matches!(std::env::var("REPORT_STATS"), Ok(s) if s.as_str() == "1"), + ..Default::default() + }; + let ok = run_move_unit_tests( &pkg_path, build_config.clone(), // TODO(Gas): double check if this is correct - UnitTestingConfig::default_with_bound(Some(100_000)), + utc, aptos_test_natives(), aptos_test_feature_flags_genesis(), + /* gas limit */ Some(100_000), /* cost_table */ None, /* compute_coverage */ false, &mut std::io::stdout(), - ) - .unwrap(); - if ok != UnitTestResult::Success { + ); + if ok.is_err() || ok.is_ok_and(|r| r == UnitTestResult::Failure) { panic!("move unit tests failed") } - if get_move_compiler_v2_from_env() { - // Run test against v2 when MOVE_COMPILER_V2 is set - compiler_config.compiler_version = Some(CompilerVersion::V2_0); - build_config.compiler_config = compiler_config; - ok = run_move_unit_tests( - &pkg_path, - build_config, - UnitTestingConfig::default_with_bound(Some(100_000)), - aptos_test_natives(), - aptos_test_feature_flags_genesis(), - /* cost_table */ None, - /* compute_coverage */ false, - &mut std::io::stdout(), - ) - .unwrap(); - } - if ok != UnitTestResult::Success { - panic!("move unit tests failed for compiler v2") - } } +/// TODO: per @vgao1996: +/// - There should be only one ground truth of `aptos_test_natives`. +/// But rn it's defined here, in `move-examples` and in `framework-experimental`. +/// - This function updates a global config (in `configure_extended_checks_for_unit_test`) +/// then returns a list natives. This pattern is confusing. +/// More discussion: https://github.com/aptos-labs/aptos-core/pull/15997#discussion_r1994469668 pub fn aptos_test_natives() -> NativeFunctionTable { // By side effect, configure for unit tests natives::configure_for_unit_test(); @@ -82,25 +77,30 @@ pub fn aptos_test_natives() -> NativeFunctionTable { #[test] fn move_framework_unit_tests() { - run_tests_for_pkg("aptos-framework"); + run_tests_for_pkg("aptos-framework", false); } #[test] fn move_aptos_stdlib_unit_tests() { - run_tests_for_pkg("aptos-stdlib"); + run_tests_for_pkg("aptos-stdlib", false); } #[test] fn move_stdlib_unit_tests() { - run_tests_for_pkg("move-stdlib"); + run_tests_for_pkg("move-stdlib", false); } #[test] fn move_token_unit_tests() { - run_tests_for_pkg("aptos-token"); + run_tests_for_pkg("aptos-token", false); } #[test] fn move_token_objects_unit_tests() { - run_tests_for_pkg("aptos-token-objects"); + run_tests_for_pkg("aptos-token-objects", false); +} + +#[test] +fn move_experimental_unit_tests() { + run_tests_for_pkg("aptos-experimental", true); } diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/.gitignore b/third_party/move/move-compiler-v2/legacy-move-compiler/.gitignore new file mode 100644 index 0000000000000..1f57b975cdf58 --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/.gitignore @@ -0,0 +1 @@ +/output diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/Cargo.toml b/third_party/move/move-compiler-v2/legacy-move-compiler/Cargo.toml new file mode 100644 index 0000000000000..88d9c55cc4f8b --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "legacy-move-compiler" +version = "0.0.1" +authors = ["Diem Association "] +description = "Legacy Move Compiler Passes" +publish = false +edition = "2021" +license = "Apache-2.0" + +[dependencies] +anyhow = { workspace = true } +clap = { workspace = true, features = ["derive"] } +codespan-reporting = { workspace = true } +hex = { workspace = true } +once_cell = { workspace = true } +petgraph = { workspace = true } +regex = { workspace = true } +tempfile = { workspace = true } + +bcs = { workspace = true } + +move-binary-format = { workspace = true } +move-bytecode-source-map = { workspace = true } +move-bytecode-verifier = { workspace = true } +move-command-line-common = { workspace = true } +move-core-types = { workspace = true } +move-ir-types = { workspace = true } +move-symbol-pool = { workspace = true } diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/src/command_line/compiler.rs b/third_party/move/move-compiler-v2/legacy-move-compiler/src/command_line/compiler.rs new file mode 100644 index 0000000000000..11a91117baddc --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/src/command_line/compiler.rs @@ -0,0 +1,610 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + command_line::{DEFAULT_OUTPUT_DIR, MOVE_COMPILED_INTERFACES_DIR}, + compiled_unit, + compiled_unit::AnnotatedCompiledUnit, + diagnostics::{codes::Severity, *}, + expansion, interface_generator, parser, + parser::{comments::*, *}, + shared::{ + ast_debug, CompilationEnv, Flags, IndexedPackagePath, NamedAddressMap, NamedAddressMaps, + NumericalAddress, PackagePaths, + }, + unit_test, verification, +}; +use move_command_line_common::files::{ + extension_equals, find_filenames, MOVE_COMPILED_EXTENSION, MOVE_EXTENSION, SOURCE_MAP_EXTENSION, +}; +use move_core_types::language_storage::ModuleId as CompiledModuleId; +use move_symbol_pool::Symbol; +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt::Debug, + fs, + fs::File, + io::{Read, Write}, + path::{Path, PathBuf}, +}; +use tempfile::NamedTempFile; + +//************************************************************************************************** +// Definitions +//************************************************************************************************** + +pub struct Compiler<'a> { + maps: NamedAddressMaps, + targets: Vec, + deps: Vec, + interface_files_dir_opt: Option, + pre_compiled_lib: Option<&'a FullyCompiledProgram>, + compiled_module_named_address_mapping: BTreeMap, + flags: Flags, + known_attributes: BTreeSet, +} + +pub struct SteppedCompiler<'a, const P: Pass> { + compilation_env: CompilationEnv, + pre_compiled_lib: Option<&'a FullyCompiledProgram>, + program: Option, +} + +pub type Pass = u8; +pub const EMPTY_COMPILER: Pass = 0; +pub const PASS_PARSER: Pass = 1; +pub const PASS_EXPANSION: Pass = 2; + +#[derive(Debug)] +enum PassResult { + Parser(parser::ast::Program), + Expansion(expansion::ast::Program), +} + +#[derive(Clone)] +pub struct FullyCompiledProgram { + // TODO don't store this... + pub files: FilesSourceText, + pub parser: parser::ast::Program, + pub expansion: expansion::ast::Program, +} + +//************************************************************************************************** +// Entry points and impls +//************************************************************************************************** + +impl<'a> Compiler<'a> { + pub fn from_package_paths + Debug, NamedAddress: Into + Debug>( + targets: Vec>, + deps: Vec>, + flags: Flags, + known_attributes: &BTreeSet, + ) -> Self { + fn indexed_scopes( + maps: &mut NamedAddressMaps, + all_pkgs: Vec + Debug, impl Into + Debug>>, + ) -> Vec { + let mut idx_paths = vec![]; + for PackagePaths { + name, + paths, + named_address_map, + } in all_pkgs + { + let idx = maps.insert( + named_address_map + .into_iter() + .map(|(k, v)| (k.into(), v)) + .collect::(), + ); + idx_paths.extend(paths.into_iter().map(|path| IndexedPackagePath { + package: name, + path: path.into(), + named_address_map: idx, + })) + } + idx_paths + } + let mut maps = NamedAddressMaps::new(); + let targets = indexed_scopes(&mut maps, targets); + let deps = indexed_scopes(&mut maps, deps); + + Self { + maps, + targets, + deps, + interface_files_dir_opt: None, + pre_compiled_lib: None, + compiled_module_named_address_mapping: BTreeMap::new(), + flags, + known_attributes: known_attributes.clone(), + } + } + + pub fn from_files + Debug, NamedAddress: Into + Clone + Debug>( + targets: Vec, + deps: Vec, + named_address_map: BTreeMap, + flags: Flags, + known_attributes: &BTreeSet, + ) -> Self { + let targets = vec![PackagePaths { + name: None, + paths: targets, + named_address_map: named_address_map.clone(), + }]; + let deps = vec![PackagePaths { + name: None, + paths: deps, + named_address_map, + }]; + Self::from_package_paths(targets, deps, flags, known_attributes) + } + + pub fn set_interface_files_dir(mut self, dir: String) -> Self { + assert!(self.interface_files_dir_opt.is_none()); + self.interface_files_dir_opt = Some(dir); + self + } + + pub fn set_interface_files_dir_opt(mut self, dir_opt: Option) -> Self { + assert!(self.interface_files_dir_opt.is_none()); + self.interface_files_dir_opt = dir_opt; + self + } + + pub fn set_pre_compiled_lib(mut self, pre_compiled_lib: &'a FullyCompiledProgram) -> Self { + assert!(self.pre_compiled_lib.is_none()); + self.pre_compiled_lib = Some(pre_compiled_lib); + self + } + + pub fn set_pre_compiled_lib_opt( + mut self, + pre_compiled_lib: Option<&'a FullyCompiledProgram>, + ) -> Self { + assert!(self.pre_compiled_lib.is_none()); + self.pre_compiled_lib = pre_compiled_lib; + self + } + + pub fn set_compiled_module_named_address_mapping( + mut self, + compiled_module_named_address_mapping: BTreeMap, + ) -> Self { + assert!(self.compiled_module_named_address_mapping.is_empty()); + self.compiled_module_named_address_mapping = compiled_module_named_address_mapping; + self + } + + pub fn run( + self, + ) -> anyhow::Result<( + FilesSourceText, + Result<(CommentMap, SteppedCompiler<'a, TARGET>), Diagnostics>, + )> { + let Self { + maps, + targets, + mut deps, + interface_files_dir_opt, + pre_compiled_lib, + compiled_module_named_address_mapping, + flags, + known_attributes, + } = self; + generate_interface_files_for_deps( + &mut deps, + interface_files_dir_opt, + &compiled_module_named_address_mapping, + )?; + let mut compilation_env = CompilationEnv::new(flags, known_attributes); + let (source_text, pprog_and_comments_res) = + parse_program(&mut compilation_env, maps, targets, deps)?; + let res: Result<_, Diagnostics> = pprog_and_comments_res.and_then(|(pprog, comments)| { + SteppedCompiler::new_at_parser(compilation_env, pre_compiled_lib, pprog) + .run::() + .map(|compiler| (comments, compiler)) + }); + Ok((source_text, res)) + } +} + +impl<'a, const P: Pass> SteppedCompiler<'a, P> { + fn run_impl(self) -> Result, Diagnostics> { + assert!(P > EMPTY_COMPILER); + assert!(self.program.is_some()); + assert!(self.program.as_ref().unwrap().equivalent_pass() == P); + assert!( + P <= PASS_EXPANSION, + "Invalid pass for run_to. Initial pass is too large." + ); + assert!( + P <= TARGET, + "Invalid pass for run_to. Target pass precedes the current pass" + ); + let Self { + mut compilation_env, + pre_compiled_lib, + program, + } = self; + let new_prog = run( + &mut compilation_env, + pre_compiled_lib, + program.unwrap(), + TARGET, + |_, _| (), + )?; + assert!(new_prog.equivalent_pass() == TARGET); + Ok(SteppedCompiler { + compilation_env, + pre_compiled_lib, + program: Some(new_prog), + }) + } + + pub fn compilation_env(&mut self) -> &mut CompilationEnv { + &mut self.compilation_env + } +} + +macro_rules! ast_stepped_compilers { + ($(($pass:ident, $mod:ident, $result:ident, $at_ast:ident, $new:ident)),*) => { + impl<'a> SteppedCompiler<'a, EMPTY_COMPILER> { + $( + pub fn $at_ast(self, ast: $mod::ast::Program) -> SteppedCompiler<'a, {$pass}> { + let Self { + compilation_env, + pre_compiled_lib, + program, + } = self; + assert!(program.is_none()); + SteppedCompiler::$new( + compilation_env, + pre_compiled_lib, + ast + ) + } + )* + } + + $( + impl<'a> SteppedCompiler<'a, {$pass}> { + fn $new( + compilation_env: CompilationEnv, + pre_compiled_lib: Option<&'a FullyCompiledProgram>, + ast: $mod::ast::Program, + ) -> Self { + Self { + compilation_env, + pre_compiled_lib, + program: Some(PassResult::$result(ast)), + } + } + + pub fn run( + self + ) -> Result, Diagnostics> { + self.run_impl() + } + + pub fn into_ast(self) -> (SteppedCompiler<'a, EMPTY_COMPILER>, $mod::ast::Program) { + let Self { + compilation_env, + pre_compiled_lib, + program, + } = self; + let ast = match program { + Some(PassResult::$result(ast)) => ast, + _ => panic!(), + }; + let next = SteppedCompiler { + compilation_env, + pre_compiled_lib, + program: None, + }; + (next, ast) + } + } + )* + }; +} + +ast_stepped_compilers!( + (PASS_PARSER, parser, Parser, at_parser, new_at_parser), + ( + PASS_EXPANSION, + expansion, + Expansion, + at_expansion, + new_at_expansion + ) +); + +//************************************************************************************************** +// Utils +//************************************************************************************************** + +macro_rules! dir_path { + ($($dir:expr),+) => {{ + let mut p = PathBuf::new(); + $(p.push($dir);)+ + p + }}; +} + +macro_rules! file_path { + ($dir:expr, $name:expr, $ext:expr) => {{ + let mut p = PathBuf::from($dir); + p.push($name); + p.set_extension($ext); + p + }}; +} + +/// Runs the bytecode verifier on the compiled units +/// Fails if the bytecode verifier errors +pub fn sanity_check_compiled_units( + files: FilesSourceText, + compiled_units: &[AnnotatedCompiledUnit], +) { + let ice_errors = compiled_unit::verify_units(compiled_units); + if !ice_errors.is_empty() { + report_diagnostics(&files, ice_errors) + } +} + +/// Given a file map and a set of compiled programs, saves the compiled programs to disk +pub fn output_compiled_units( + bytecode_version: Option, + emit_source_maps: bool, + files: FilesSourceText, + compiled_units: Vec, + out_dir: &str, +) -> anyhow::Result<()> { + const SCRIPT_SUB_DIR: &str = "scripts"; + const MODULE_SUB_DIR: &str = "modules"; + fn num_digits(n: usize) -> usize { + format!("{}", n).len() + } + fn format_idx(idx: usize, width: usize) -> String { + format!("{:0width$}", idx, width = width) + } + + macro_rules! emit_unit { + ($path:ident, $unit:ident) => {{ + if emit_source_maps { + $path.set_extension(SOURCE_MAP_EXTENSION); + fs::write($path.as_path(), &$unit.serialize_source_map())?; + } + + $path.set_extension(MOVE_COMPILED_EXTENSION); + fs::write($path.as_path(), &$unit.serialize(bytecode_version))? + }}; + } + + let ice_errors = compiled_unit::verify_units(&compiled_units); + let (modules, scripts): (Vec<_>, Vec<_>) = compiled_units + .into_iter() + .partition(|u| matches!(u, AnnotatedCompiledUnit::Module(_))); + + // modules + if !modules.is_empty() { + std::fs::create_dir_all(dir_path!(out_dir, MODULE_SUB_DIR))?; + } + let digit_width = num_digits(modules.len()); + for (idx, unit) in modules.into_iter().enumerate() { + let unit = unit.into_compiled_unit(); + let mut path = dir_path!( + out_dir, + MODULE_SUB_DIR, + format!("{}_{}", format_idx(idx, digit_width), unit.name()) + ); + emit_unit!(path, unit); + } + + // scripts + if !scripts.is_empty() { + std::fs::create_dir_all(dir_path!(out_dir, SCRIPT_SUB_DIR))?; + } + for unit in scripts { + let unit = unit.into_compiled_unit(); + let mut path = dir_path!(out_dir, SCRIPT_SUB_DIR, unit.name().as_str()); + emit_unit!(path, unit); + } + + if !ice_errors.is_empty() { + report_diagnostics(&files, ice_errors) + } + Ok(()) +} + +fn generate_interface_files_for_deps( + deps: &mut Vec, + interface_files_dir_opt: Option, + module_to_named_address: &BTreeMap, +) -> anyhow::Result<()> { + let interface_files_paths = + generate_interface_files(deps, interface_files_dir_opt, module_to_named_address, true)?; + deps.extend(interface_files_paths); + // Remove bytecode files + deps.retain(|p| !p.path.as_str().ends_with(MOVE_COMPILED_EXTENSION)); + Ok(()) +} + +pub fn generate_interface_files( + mv_file_locations: &mut [IndexedPackagePath], + interface_files_dir_opt: Option, + module_to_named_address: &BTreeMap, + separate_by_hash: bool, +) -> anyhow::Result> { + let mv_files = { + let mut v = vec![]; + let (mv_magic_files, other_file_locations): (Vec<_>, Vec<_>) = + mv_file_locations.iter().cloned().partition(|s| { + Path::new(s.path.as_str()).is_file() && has_compiled_module_magic_number(&s.path) + }); + v.extend(mv_magic_files); + for IndexedPackagePath { + package, + path, + named_address_map, + } in other_file_locations + { + v.extend( + find_filenames(&[path.as_str()], |path| { + extension_equals(path, MOVE_COMPILED_EXTENSION) + })? + .into_iter() + .map(|path| IndexedPackagePath { + package, + path: path.into(), + named_address_map, + }), + ); + } + v + }; + if mv_files.is_empty() { + return Ok(vec![]); + } + + let interface_files_dir = + interface_files_dir_opt.unwrap_or_else(|| DEFAULT_OUTPUT_DIR.to_string()); + let interface_sub_dir = dir_path!(interface_files_dir, MOVE_COMPILED_INTERFACES_DIR); + let all_addr_dir = if separate_by_hash { + use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + }; + const HASH_DELIM: &str = "%|%"; + + let mut hasher = DefaultHasher::new(); + mv_files.len().hash(&mut hasher); + HASH_DELIM.hash(&mut hasher); + for IndexedPackagePath { path, .. } in &mv_files { + std::fs::read(path.as_str())?.hash(&mut hasher); + HASH_DELIM.hash(&mut hasher); + } + + let mut dir = interface_sub_dir; + dir.push(format!("{:020}", hasher.finish())); + dir + } else { + interface_sub_dir + }; + + let mut result = vec![]; + for IndexedPackagePath { + path, + package, + named_address_map, + } in mv_files + { + let (id, interface_contents) = + interface_generator::write_file_to_string(module_to_named_address, &path)?; + let addr_dir = dir_path!(all_addr_dir.clone(), format!("{}", id.address().to_hex())); + let file_path = file_path!(addr_dir.clone(), format!("{}", id.name()), MOVE_EXTENSION); + result.push(IndexedPackagePath { + path: Symbol::from(file_path.clone().into_os_string().into_string().unwrap()), + package, + named_address_map, + }); + // it's possible some files exist but not others due to multithreaded environments + if separate_by_hash && Path::new(&file_path).is_file() { + continue; + } + + std::fs::create_dir_all(&addr_dir)?; + + let mut tmp = NamedTempFile::new_in(addr_dir)?; + tmp.write_all(interface_contents.as_bytes())?; + + // it's possible some files exist but not others due to multithreaded environments + // Check for the file existing and then safely move the tmp file there if + // it does not + if separate_by_hash && Path::new(&file_path).is_file() { + continue; + } + std::fs::rename(tmp.path(), file_path)?; + } + + Ok(result) +} + +fn has_compiled_module_magic_number(path: &str) -> bool { + use move_binary_format::file_format_common::BinaryConstants; + let mut file = match File::open(path) { + Err(_) => return false, + Ok(f) => f, + }; + let mut magic = [0u8; BinaryConstants::MOVE_MAGIC_SIZE]; + let num_bytes_read = match file.read(&mut magic) { + Err(_) => return false, + Ok(n) => n, + }; + num_bytes_read == BinaryConstants::MOVE_MAGIC_SIZE && magic == BinaryConstants::MOVE_MAGIC +} + +//************************************************************************************************** +// Translations +//************************************************************************************************** + +impl PassResult { + pub fn equivalent_pass(&self) -> Pass { + match self { + PassResult::Parser(_) => PASS_PARSER, + PassResult::Expansion(_) => PASS_EXPANSION, + } + } +} + +fn run( + compilation_env: &mut CompilationEnv, + pre_compiled_lib: Option<&FullyCompiledProgram>, + cur: PassResult, + until: Pass, + mut result_check: impl FnMut(&PassResult, &CompilationEnv), +) -> Result { + assert!( + until <= PASS_EXPANSION, + "Invalid pass for run_to. Target is greater than maximum pass" + ); + result_check(&cur, compilation_env); + if cur.equivalent_pass() >= until { + return Ok(cur); + } + + match cur { + PassResult::Parser(prog) => { + let prog = parser::merge_spec_modules::program(compilation_env, prog); + let prog = unit_test::filter_test_members::program(compilation_env, prog); + let prog = verification::ast_filter::program(compilation_env, prog); + if compilation_env.flags().debug() { + eprintln!( + "Before expansion: program = {}", + ast_debug::display_verbose(&prog) + ) + }; + let eprog = expansion::translate::program(compilation_env, pre_compiled_lib, prog); + compilation_env.check_diags_at_or_above_severity(Severity::Bug)?; + if compilation_env.flags().debug() { + eprintln!( + "After expansion: program = {}", + ast_debug::display_verbose(&eprog) + ) + }; + run( + compilation_env, + pre_compiled_lib, + PassResult::Expansion(eprog), + until, + result_check, + ) + }, + PassResult::Expansion(_) => { + unreachable!("ICE Pass::Compilation is >= all passes") + }, + } +} diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/src/command_line/mod.rs b/third_party/move/move-compiler-v2/legacy-move-compiler/src/command_line/mod.rs new file mode 100644 index 0000000000000..66bf85092a0e2 --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/src/command_line/mod.rs @@ -0,0 +1,74 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use move_command_line_common::env::read_bool_env_var; + +pub mod compiler; + +pub const DEPENDENCY: &str = "dependency"; +pub const DEPENDENCY_SHORT: char = 'd'; + +pub const SENDER: &str = "sender"; +pub const SENDER_SHORT: char = 's'; + +pub const OUT_DIR: &str = "out-dir"; +pub const OUT_DIR_SHORT: char = 'o'; +pub const DEFAULT_OUTPUT_DIR: &str = "build"; + +pub const SHADOW: &str = "shadow"; +pub const SHADOW_SHORT: char = 'S'; + +pub const SKIP_ATTRIBUTE_CHECKS: &str = "skip-attribute-checks"; + +pub const SOURCE_MAP: &str = "source-map"; +pub const SOURCE_MAP_SHORT: char = 'm'; + +pub const TEST: &str = "test"; +pub const TEST_SHORT: char = 't'; + +pub const VERIFY: &str = "verify"; +pub const VERIFY_SHORT: char = 'v'; + +pub const LANGUAGE_VERSION: &str = "language-version"; + +/// Color flag interpreted locally in diagnostics/mod.rs. +/// (Is translated to codespan_reporting::term::termcolor::ColorChoice). +/// Choices here are `NONE`, `ANSI`, `ALWAYS`, with default to Auto. +pub const COLOR_MODE_ENV_VAR: &str = "COLOR_MODE"; + +pub const MOVE_COMPILED_INTERFACES_DIR: &str = "mv_interfaces"; + +pub const COMPILED_NAMED_ADDRESS_MAPPING: &str = "compiled-module-address-name"; + +// default value for compiler --debug flag (1 or true to set) +// (usually for debugging situations where compiler flags are hard to reach) +pub const MOVE_COMPILER_DEBUG_ENV_VAR: &str = "MOVE_COMPILER_DEBUG"; +pub const MVC_DEBUG_ENV_VAR: &str = "MVC_DEBUG"; + +// Name of compiler CLI debug clap flag (in CLI, looks like --debug): +pub const DEBUG_FLAG: &str = "debug"; + +// default value for boolean --dump-bytecode flag (1 or true to set) +// (usually for debugging situations where compiler flags are hard to reach) +pub const MOVE_COMPILER_DUMP_ENV_VAR: &str = "MOVE_COMPILER_DUMP"; +pub const MVC_DUMP_ENV_VAR: &str = "MVC_DUMP"; + +pub const MOVE_COMPILER_WARN_OF_DEPRECATION_USE: &str = "MOVE_COMPILER_WARN_OF_DEPRECATION_USE"; +pub const MOVE_COMPILER_WARN_OF_DEPRECATION_USE_FLAG: &str = "Wdeprecation"; + +pub const WARN_OF_DEPRECATION_USE_IN_APTOS_LIBS: &str = "WARN_OF_DEPRECATION_USE_IN_APTOS_LIBS"; +pub const WARN_OF_DEPRECATION_USE_IN_APTOS_LIBS_FLAG: &str = "Wdeprecation-aptos"; + +pub const WARN_UNUSED_FLAG: &str = "Wunused"; + +pub const LANG_V2_FLAG: &str = "lang_v2"; + +// Flag to dump a stacktrace on a compiler error, for users who like +// to keep RUST_BACKTRACE always enabled. +pub const MOVE_COMPILER_BACKTRACE_ENV_VAR: &str = "MOVE_COMPILER_BACKTRACE"; +pub const MVC_BACKTRACE_ENV_VAR: &str = "MVC_BACKTRACE"; + +pub fn get_move_compiler_backtrace_from_env() -> bool { + read_bool_env_var(MOVE_COMPILER_BACKTRACE_ENV_VAR) || read_bool_env_var(MVC_BACKTRACE_ENV_VAR) +} diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/src/compiled_unit.rs b/third_party/move/move-compiler-v2/legacy-move-compiler/src/compiled_unit.rs new file mode 100644 index 0000000000000..dc3565a8118de --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/src/compiled_unit.rs @@ -0,0 +1,262 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + diag, + diagnostics::Diagnostics, + expansion::ast::{ModuleIdent, ModuleIdent_, SpecId}, + parser::ast::{ModuleName, Var}, + shared::{Name, NumericalAddress}, +}; +use move_binary_format::file_format as F; +use move_bytecode_source_map::source_map::SourceMap; +use move_core_types::{ + account_address::AccountAddress, identifier::Identifier as MoveCoreIdentifier, + language_storage::ModuleId, +}; +use move_ir_types::location::*; +use move_symbol_pool::Symbol; +use std::collections::BTreeMap; + +//************************************************************************************************** +// Compiled Unit +//************************************************************************************************** + +#[derive(Debug, Clone)] +pub struct SpecInfo { + pub offset: F::CodeOffset, + // Re-mapped function pointer names + pub used_lambda_funs: BTreeMap)>, +} + +#[derive(Debug, Clone)] +pub struct FunctionInfo { + pub spec_info: BTreeMap, +} + +#[derive(Debug, Clone)] +pub struct NamedCompiledModule { + // package name metadata from compiler arguments + pub package_name: Option, + pub address: NumericalAddress, + pub name: Symbol, + pub module: F::CompiledModule, + pub source_map: SourceMap, +} + +#[derive(Debug, Clone)] +pub struct NamedCompiledScript { + // package name metadata from compiler arguments + pub package_name: Option, + pub name: Symbol, + pub script: F::CompiledScript, + pub source_map: SourceMap, +} + +#[derive(Debug, Clone)] +pub struct AnnotatedCompiledModule { + pub loc: Loc, + pub module_name_loc: Loc, + pub address_name: Option, + pub named_module: NamedCompiledModule, +} + +#[derive(Debug, Clone)] +pub struct AnnotatedCompiledScript { + pub loc: Loc, + pub named_script: NamedCompiledScript, + pub function_info: FunctionInfo, +} + +pub trait TargetModule {} +pub trait TargetScript {} +impl TargetScript for AnnotatedCompiledScript {} +impl TargetScript for NamedCompiledScript {} +impl TargetModule for AnnotatedCompiledModule {} +impl TargetModule for NamedCompiledModule {} + +#[derive(Debug, Clone)] +pub enum CompiledUnitEnum { + Module(TModule), + Script(TScript), +} + +pub type CompiledUnit = CompiledUnitEnum; +pub type AnnotatedCompiledUnit = CompiledUnitEnum; + +impl AnnotatedCompiledModule { + pub fn module_ident(&self) -> ModuleIdent { + use crate::expansion::ast::Address; + let address = + Address::Numerical(self.address_name, sp(self.loc, self.named_module.address)); + sp( + self.loc, + ModuleIdent_::new( + address, + ModuleName(sp(self.module_name_loc, self.named_module.name)), + ), + ) + } + + pub fn module_id(&self) -> (Option, ModuleId) { + let id = ModuleId::new( + AccountAddress::new(self.named_module.address.into_bytes()), + MoveCoreIdentifier::new(self.named_module.name.to_string()).unwrap(), + ); + (self.address_name, id) + } +} + +impl AnnotatedCompiledUnit { + pub fn verify(&self) -> Diagnostics { + match self { + Self::Module(AnnotatedCompiledModule { + loc, + named_module: + NamedCompiledModule { + module, source_map, .. + }, + .. + }) => verify_module(source_map, *loc, module), + Self::Script(AnnotatedCompiledScript { + loc, + named_script: + NamedCompiledScript { + script, source_map, .. + }, + .. + }) => verify_script(source_map, *loc, script), + } + } + + pub fn into_compiled_unit(self) -> CompiledUnit { + match self { + Self::Module(AnnotatedCompiledModule { + named_module: module, + .. + }) => CompiledUnitEnum::Module(module), + Self::Script(AnnotatedCompiledScript { + named_script: script, + .. + }) => CompiledUnitEnum::Script(script), + } + } + + pub fn package_name(&self) -> Option { + match self { + Self::Module(AnnotatedCompiledModule { named_module, .. }) => named_module.package_name, + Self::Script(AnnotatedCompiledScript { named_script, .. }) => named_script.package_name, + } + } + + pub fn loc(&self) -> &Loc { + match self { + Self::Module(AnnotatedCompiledModule { loc, .. }) + | Self::Script(AnnotatedCompiledScript { loc, .. }) => loc, + } + } +} + +impl CompiledUnit { + pub fn name(&self) -> Symbol { + match self { + Self::Module(NamedCompiledModule { name, .. }) + | Self::Script(NamedCompiledScript { name, .. }) => *name, + } + } + + pub fn package_name(&self) -> Option { + match self { + Self::Module(NamedCompiledModule { package_name, .. }) + | Self::Script(NamedCompiledScript { package_name, .. }) => *package_name, + } + } + + pub fn source_map(&self) -> &SourceMap { + match self { + Self::Module(NamedCompiledModule { source_map, .. }) + | Self::Script(NamedCompiledScript { source_map, .. }) => source_map, + } + } + + pub fn serialize(&self, bytecode_version: Option) -> Vec { + let mut serialized = Vec::::new(); + match self { + Self::Module(NamedCompiledModule { module, .. }) => module + .serialize_for_version(bytecode_version, &mut serialized) + .unwrap(), + Self::Script(NamedCompiledScript { script, .. }) => script + .serialize_for_version(bytecode_version, &mut serialized) + .unwrap(), + }; + serialized + } + + #[allow(dead_code)] + pub fn serialize_debug(self) -> Vec { + match self { + Self::Module(NamedCompiledModule { module, .. }) => format!("{:?}", module), + Self::Script(NamedCompiledScript { script, .. }) => format!("{:?}", script), + } + .into() + } + + pub fn serialize_source_map(&self) -> Vec { + match self { + Self::Module(NamedCompiledModule { source_map, .. }) => { + bcs::to_bytes(source_map).unwrap() + }, + Self::Script(NamedCompiledScript { source_map, .. }) => { + bcs::to_bytes(source_map).unwrap() + }, + } + } +} + +fn bytecode_verifier_mismatch_bug( + sm: &SourceMap, + loc: Loc, + location: move_binary_format::errors::Location, + e: move_binary_format::errors::VMError, +) -> Diagnostics { + let loc = match e.offsets().first() { + Some((fdef_idx, offset)) if &location == e.location() => { + sm.get_code_location(*fdef_idx, *offset).unwrap_or(loc) + }, + _ => loc, + }; + Diagnostics::from(vec![diag!( + Bug::BytecodeVerification, + (loc, format!("ICE failed bytecode verifier: {:#?}", e)), + )]) +} + +fn verify_module(sm: &SourceMap, loc: Loc, cm: &F::CompiledModule) -> Diagnostics { + match move_bytecode_verifier::verifier::verify_module(cm) { + Ok(_) => Diagnostics::new(), + Err(e) => bytecode_verifier_mismatch_bug( + sm, + loc, + move_binary_format::errors::Location::Module(cm.self_id()), + e, + ), + } +} + +fn verify_script(sm: &SourceMap, loc: Loc, cs: &F::CompiledScript) -> Diagnostics { + match move_bytecode_verifier::verifier::verify_script(cs) { + Ok(_) => Diagnostics::new(), + Err(e) => { + bytecode_verifier_mismatch_bug(sm, loc, move_binary_format::errors::Location::Script, e) + }, + } +} + +pub fn verify_units<'a>(units: impl IntoIterator) -> Diagnostics { + let mut diags = Diagnostics::new(); + for unit in units { + diags.extend(unit.verify()); + } + diags +} diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/src/diagnostics/codes.rs b/third_party/move/move-compiler-v2/legacy-move-compiler/src/diagnostics/codes.rs new file mode 100644 index 0000000000000..3b4b7bdee649d --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/src/diagnostics/codes.rs @@ -0,0 +1,360 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +//************************************************************************************************** +// Main types +//************************************************************************************************** + +#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, PartialOrd, Ord)] +pub enum Severity { + Warning = 0, + NonblockingError = 1, + BlockingError = 2, + Bug = 3, +} + +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +pub struct DiagnosticInfo { + severity: Severity, + category: Category, + code: u8, + message: &'static str, +} + +pub trait DiagnosticCode: Copy { + const CATEGORY: Category; + + fn severity(self) -> Severity; + + fn code_and_message(self) -> (u8, &'static str); + + fn into_info(self) -> DiagnosticInfo { + let severity = self.severity(); + let category = Self::CATEGORY; + let (code, message) = self.code_and_message(); + DiagnosticInfo { + severity, + category, + code, + message, + } + } +} + +//************************************************************************************************** +// Categories and Codes +//************************************************************************************************** + +macro_rules! codes { + ($($cat:ident: [ + $($code:ident: { msg: $code_msg:literal, severity:$sev:ident $(,)? }),* $(,)? + ]),* $(,)?) => { + #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] + #[repr(u8)] + pub enum Category { + $($cat,)* + } + + $( + #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] + #[repr(u8)] + pub enum $cat { + DontStartAtZeroPlaceholder, + $($code,)* + } + + impl DiagnosticCode for $cat { + const CATEGORY: Category = { + // hacky check that $cat_num <= 99 + let cat_is_leq_99 = (Category::$cat as u8) <= 99; + ["Diagnostic Category must be a u8 <= 99"][!cat_is_leq_99 as usize]; + Category::$cat + }; + + fn severity(self) -> Severity { + match self { + Self::DontStartAtZeroPlaceholder => + panic!("ICE do not use placeholder error code"), + $(Self::$code => Severity::$sev,)* + } + } + + fn code_and_message(self) -> (u8, &'static str) { + let code = self as u8; + debug_assert!(code > 0); + match self { + Self::DontStartAtZeroPlaceholder => + panic!("ICE do not use placeholder error code"), + $(Self::$code => (code, $code_msg),)* + } + } + } + )* + + }; +} + +codes!( + // bucket for random one off errors. unlikely to be used + Uncategorized: [ + DeprecatedWillBeRemoved: { msg: "DEPRECATED. will be removed", severity: Warning }, + ], + // syntax errors + Syntax: [ + InvalidCharacter: { msg: "invalid character", severity: NonblockingError }, + UnexpectedToken: { msg: "unexpected token", severity: NonblockingError }, + InvalidModifier: { msg: "invalid modifier", severity: NonblockingError }, + InvalidDocComment: { msg: "invalid documentation comment", severity: Warning }, + InvalidAddress: { msg: "invalid address", severity: NonblockingError }, + InvalidNumber: { msg: "invalid number literal", severity: NonblockingError }, + InvalidByteString: { msg: "invalid byte string", severity: NonblockingError }, + InvalidHexString: { msg: "invalid hex string", severity: NonblockingError }, + InvalidLValue: { msg: "invalid assignment", severity: NonblockingError }, + SpecContextRestricted: + { msg: "syntax item restricted to spec contexts", severity: BlockingError }, + InvalidSpecBlockMember: { msg: "invalid spec block member", severity: NonblockingError }, + InvalidAccessSpecifier: { msg: "invalid access specifier", severity: NonblockingError }, + UnsupportedLanguageItem: { msg: "unsupported language construct", severity: BlockingError }, + InvalidVariantAccess: { msg: "invalid variant name", severity: BlockingError }, + ], + // errors for any rules around declaration items + Declarations: [ + DuplicateItem: + { msg: "duplicate declaration, item, or annotation", severity: NonblockingError }, + UnnecessaryItem: { msg: "unnecessary or extraneous item", severity: NonblockingError }, + InvalidAddress: { msg: "invalid 'address' declaration", severity: NonblockingError }, + InvalidModule: { msg: "invalid 'module' declaration", severity: NonblockingError }, + InvalidScript: { msg: "invalid 'script' declaration", severity: NonblockingError }, + InvalidConstant: { msg: "invalid 'const' declaration", severity: NonblockingError }, + InvalidFunction: { msg: "invalid 'fun' declaration", severity: NonblockingError }, + InvalidStruct: { msg: "invalid 'struct' declaration", severity: NonblockingError }, + InvalidSpec: { msg: "invalid 'spec' declaration", severity: NonblockingError }, + InvalidName: { msg: "invalid name", severity: BlockingError }, + InvalidFriendDeclaration: + { msg: "invalid 'friend' declaration", severity: NonblockingError }, + InvalidAcquiresItem: { msg: "invalid 'acquires' item", severity: NonblockingError }, + InvalidPhantomUse: + { msg: "invalid phantom type parameter usage", severity: NonblockingError }, + InvalidNonPhantomUse: + { msg: "invalid non-phantom type parameter usage", severity: Warning }, + InvalidAttribute: { msg: "invalid attribute", severity: NonblockingError }, + // TODO(https://github.com/aptos-labs/aptos-core/issues/9411) turn into NonblockingError when safe to do so. + UnknownAttribute: { msg: "unknown attribute", severity: Warning }, + ], + // errors name resolution, mostly expansion/translate and naming/translate + NameResolution: [ + AddressWithoutValue: { msg: "address with no value", severity: NonblockingError }, + UnboundModule: { msg: "unbound module", severity: BlockingError }, + UnboundModuleMember: { msg: "unbound module member", severity: BlockingError }, + UnboundType: { msg: "unbound type", severity: BlockingError }, + UnboundUnscopedName: { msg: "unbound unscoped name", severity: BlockingError }, + NamePositionMismatch: { msg: "unexpected name in this position", severity: BlockingError }, + TooManyTypeArguments: { msg: "too many type arguments", severity: NonblockingError }, + TooFewTypeArguments: { msg: "too few type arguments", severity: BlockingError }, + UnboundVariable: { msg: "unbound variable", severity: BlockingError }, + UnboundField: { msg: "unbound field", severity: BlockingError }, + ReservedName: { msg: "invalid use of reserved name", severity: BlockingError }, + + DeprecatedAddressBlock: { msg: "Use of deprecated address block", severity: Warning }, + DeprecatedModule: { msg: "Use of deprecated module", severity: Warning }, + DeprecatedMember: { msg: "Use of deprecated member", severity: Warning }, + DeprecatedStruct: { msg: "Use of deprecated struct", severity: Warning }, + DeprecatedFunction: { msg: "Use of deprecated function", severity: Warning }, + DeprecatedConstant: { msg: "Use of deprecated constant", severity: Warning }, + ], + // errors for typing rules. mostly typing/translate + TypeSafety: [ + Visibility: { msg: "restricted visibility", severity: BlockingError }, + ScriptContext: { msg: "requires script context", severity: NonblockingError }, + BuiltinOperation: { msg: "built-in operation not supported", severity: BlockingError }, + ExpectedBaseType: { msg: "expected a single non-reference type", severity: BlockingError }, + ExpectedSingleType: { msg: "expected a single type", severity: BlockingError }, + SubtypeError: { msg: "invalid subtype", severity: BlockingError }, + JoinError: { msg: "incompatible types", severity: BlockingError }, + RecursiveType: { msg: "invalid type. recursive type found", severity: BlockingError }, + ExpectedSpecificType: { msg: "expected specific type", severity: BlockingError }, + UninferredType: { msg: "cannot infer type", severity: BlockingError }, + ScriptSignature: { msg: "invalid script signature", severity: NonblockingError }, + TypeForConstant: { msg: "invalid type for constant", severity: BlockingError }, + UnsupportedConstant: + { msg: "invalid statement or expression in constant", severity: BlockingError }, + InvalidLoopControl: { msg: "invalid loop control", severity: BlockingError }, + InvalidNativeUsage: { msg: "invalid use of native item", severity: BlockingError }, + TooFewArguments: { msg: "too few arguments", severity: BlockingError }, + TooManyArguments: { msg: "too many arguments", severity: NonblockingError }, + CyclicData: { msg: "cyclic data", severity: NonblockingError }, + CyclicInstantiation: + { msg: "cyclic type instantiation", severity: NonblockingError }, + MissingAcquires: { msg: "missing acquires annotation", severity: NonblockingError }, + InvalidNum: { msg: "invalid number after type inference", severity: NonblockingError }, + NonInvocablePublicScript: { + msg: "script function cannot be invoked with this signature \ + (NOTE: this may become an error in the future)", + severity: Warning + }, + InvalidCallTarget: { msg: "invalid call target", severity: BlockingError }, + InvalidFunctionType: { msg: "invalid usage of function type", severity: BlockingError }, + ], + // errors for ability rules. mostly typing/translate + AbilitySafety: [ + Constraint: { msg: "ability constraint not satisfied", severity: NonblockingError }, + ImplicitlyCopyable: { msg: "type not implicitly copyable", severity: NonblockingError }, + ], + // errors for move rules. mostly cfgir/locals + MoveSafety: [ + UnusedUndroppable: { msg: "unused value without 'drop'", severity: NonblockingError }, + UnassignedVariable: { msg: "use of unassigned variable", severity: NonblockingError }, + ], + // errors for move rules. mostly cfgir/borrows + ReferenceSafety: [ + RefTrans: { msg: "referential transparency violated", severity: BlockingError }, + MutOwns: { msg: "mutable ownership violated", severity: NonblockingError }, + Dangling: { + msg: "invalid operation, could create dangling a reference", + severity: NonblockingError, + }, + InvalidReturn: + { msg: "invalid return of locally borrowed state", severity: NonblockingError }, + InvalidTransfer: { msg: "invalid transfer of references", severity: NonblockingError }, + AmbiguousVariableUsage: { msg: "ambiguous usage of variable", severity: NonblockingError }, + ], + BytecodeGeneration: [ + UnfoldableConstant: { msg: "cannot compute constant value", severity: NonblockingError }, + ], + // errors for any unused code or items + UnusedItem: [ + Alias: { msg: "unused alias", severity: Warning }, + Variable: { msg: "unused variable", severity: Warning }, + Assignment: { msg: "unused assignment", severity: Warning }, + TrailingSemi: { msg: "unnecessary trailing semicolon", severity: Warning }, + DeadCode: { msg: "dead or unreachable code", severity: Warning }, + StructTypeParam: { msg: "unused struct type parameter", severity: Warning }, + Attribute: { msg: "unused attribute", severity: Warning }, + ], + Attributes: [ + Duplicate: { msg: "invalid duplicate attribute", severity: NonblockingError }, + InvalidName: { msg: "invalid attribute name", severity: NonblockingError }, + InvalidValue: { msg: "invalid attribute value", severity: NonblockingError }, + InvalidUsage: { msg: "invalid usage of known attribute", severity: NonblockingError }, + InvalidTest: { msg: "unable to generate test", severity: NonblockingError }, + InvalidBytecodeInst: + { msg: "unknown bytecode instruction function", severity: NonblockingError }, + ValueWarning: { msg: "potential issue with attribute value", severity: Warning } + ], + Tests: [ + TestFailed: { msg: "test failure", severity: BlockingError }, + ], + Bug: [ + BytecodeGeneration: { msg: "BYTECODE GENERATION FAILED", severity: Bug }, + BytecodeVerification: { msg: "BYTECODE VERIFICATION FAILED", severity: Bug }, + Unimplemented: { msg: "Not yet implemented", severity: BlockingError }, + ], + Derivation: [ + DeriveFailed: { msg: "attribute derivation failed", severity: BlockingError } + ], + // errors for inlining + Inlining: [ + Recursion: { msg: "recursion during function inlining not allowed", severity: BlockingError }, + AfterExpansion: { msg: "Inlined code invalid in this context", severity: BlockingError }, + Unsupported: { msg: "feature not supported in inlined functions", severity: BlockingError }, + UnexpectedLambda: { msg: "lambda parameter only permitted as parameter to inlined function", severity: BlockingError }, + ], +); + +//************************************************************************************************** +// impls +//************************************************************************************************** + +impl DiagnosticInfo { + pub fn render(self) -> (/* code */ String, /* message */ &'static str) { + let Self { + severity, + category, + code, + message, + } = self; + let sev_prefix = match severity { + Severity::BlockingError | Severity::NonblockingError => "E", + Severity::Warning => "W", + Severity::Bug => "ICE", + }; + let cat_prefix: u8 = category as u8; + debug_assert!(cat_prefix <= 99); + let string_code = format!("{}{:02}{:03}", sev_prefix, cat_prefix, code); + (string_code, message) + } + + pub fn message(&self) -> &'static str { + self.message + } + + pub fn severity(&self) -> Severity { + self.severity + } +} + +impl Severity { + pub const MAX: Self = Self::Bug; + pub const MIN: Self = Self::Warning; + + pub fn into_codespan_severity(self) -> codespan_reporting::diagnostic::Severity { + use codespan_reporting::diagnostic::Severity as CSRSeverity; + match self { + Severity::Bug => CSRSeverity::Bug, + Severity::BlockingError | Severity::NonblockingError => CSRSeverity::Error, + Severity::Warning => CSRSeverity::Warning, + } + } +} + +impl Default for Severity { + fn default() -> Self { + Self::MIN + } +} + +#[derive(Clone, Copy)] +pub enum DeprecatedItem { + Module, + Member, + Struct, + Function, + Constant, + AddressBlock, +} + +impl DeprecatedItem { + pub fn get_string(&self) -> &'static str { + match self { + DeprecatedItem::Module => "module", + DeprecatedItem::Member => "member", + DeprecatedItem::Struct => "struct", + DeprecatedItem::Function => "function", + DeprecatedItem::Constant => "constant", + DeprecatedItem::AddressBlock => "address block", + } + } + + pub fn get_capitalized_string(&self) -> &'static str { + match self { + DeprecatedItem::Module => "Module", + DeprecatedItem::Member => "Member", + DeprecatedItem::Struct => "Struct", + DeprecatedItem::Function => "Function", + DeprecatedItem::Constant => "Constant", + DeprecatedItem::AddressBlock => "Address block", + } + } + + pub fn get_code(&self) -> impl DiagnosticCode { + match self { + DeprecatedItem::Module => NameResolution::DeprecatedModule, + DeprecatedItem::Member => NameResolution::DeprecatedMember, + DeprecatedItem::Struct => NameResolution::DeprecatedStruct, + DeprecatedItem::Function => NameResolution::DeprecatedFunction, + DeprecatedItem::Constant => NameResolution::DeprecatedConstant, + DeprecatedItem::AddressBlock => NameResolution::DeprecatedAddressBlock, + } + } +} diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/src/diagnostics/mod.rs b/third_party/move/move-compiler-v2/legacy-move-compiler/src/diagnostics/mod.rs new file mode 100644 index 0000000000000..290d60ccdd40b --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/src/diagnostics/mod.rs @@ -0,0 +1,409 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +pub mod codes; + +use crate::{ + command_line::COLOR_MODE_ENV_VAR, + diagnostics::codes::{DiagnosticCode, DiagnosticInfo, Severity}, +}; +use codespan_reporting::{ + self as csr, + files::SimpleFiles, + term::{ + emit, + termcolor::{Buffer, ColorChoice, StandardStream, WriteColor}, + Config, + }, +}; +use move_command_line_common::{ + env::{read_bool_env_var, read_env_var}, + files::FileHash, +}; +use move_ir_types::location::*; +use move_symbol_pool::Symbol; +use once_cell::sync::Lazy; +use std::{ + backtrace::{Backtrace, BacktraceStatus}, + collections::{BTreeMap, HashMap, HashSet}, + iter::FromIterator, + ops::Range, +}; + +//************************************************************************************************** +// Types +//************************************************************************************************** + +pub type FileId = usize; +pub type FileName = Symbol; + +pub type FilesSourceText = HashMap; +type FileMapping = HashMap; + +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +#[must_use] +pub struct Diagnostic { + info: DiagnosticInfo, + primary_label: (Loc, String), + secondary_labels: Vec<(Loc, String)>, + notes: Vec, +} + +#[derive(PartialEq, Eq, Hash, Clone, Debug, Default)] +pub struct Diagnostics { + diagnostics: Vec, + severity_count: BTreeMap, +} + +//************************************************************************************************** +// Reporting +//************************************************************************************************** + +pub fn report_diagnostics(files: &FilesSourceText, diags: Diagnostics) -> ! { + let should_exit = true; + report_diagnostics_impl(files, diags, should_exit); + unreachable!() +} + +// report diagnostics, but do not exit if diags are all warnings +pub fn report_diagnostics_exit_on_error(files: &FilesSourceText, diags: Diagnostics) { + let should_exit = diags + .diagnostics + .iter() + .any(|diag| diag.info.severity() > Severity::Warning); + report_diagnostics_impl(files, diags, should_exit); +} + +pub fn report_warnings(files: &FilesSourceText, warnings: Diagnostics) { + if warnings.is_empty() { + return; + } + debug_assert!(warnings.max_severity().unwrap() == Severity::Warning); + report_diagnostics_impl(files, warnings, false) +} + +fn report_diagnostics_impl(files: &FilesSourceText, diags: Diagnostics, should_exit: bool) { + let color_choice = match read_env_var(COLOR_MODE_ENV_VAR).as_str() { + "NONE" => ColorChoice::Never, + "ANSI" => ColorChoice::AlwaysAnsi, + "ALWAYS" => ColorChoice::Always, + _ => ColorChoice::Auto, + }; + let mut writer = StandardStream::stderr(color_choice); + output_diagnostics(&mut writer, files, diags); + if should_exit { + std::process::exit(1); + } +} + +pub fn unwrap_or_report_diagnostics(files: &FilesSourceText, res: Result) -> T { + match res { + Ok(t) => t, + Err(diags) => { + assert!(!diags.is_empty()); + report_diagnostics(files, diags) + }, + } +} + +pub fn report_diagnostics_to_buffer(files: &FilesSourceText, diags: Diagnostics) -> Vec { + let mut writer = Buffer::no_color(); + output_diagnostics(&mut writer, files, diags); + writer.into_inner() +} + +pub fn report_diagnostics_to_color_buffer(files: &FilesSourceText, diags: Diagnostics) -> Vec { + let mut writer = Buffer::ansi(); + output_diagnostics(&mut writer, files, diags); + writer.into_inner() +} + +fn output_diagnostics( + writer: &mut W, + sources: &FilesSourceText, + diags: Diagnostics, +) { + let mut files = SimpleFiles::new(); + let mut file_mapping = HashMap::new(); + for (fhash, (fname, source)) in sources { + let id = files.add(*fname, source.as_str()); + file_mapping.insert(*fhash, id); + } + render_diagnostics(writer, &files, &file_mapping, diags); +} + +fn render_diagnostics( + writer: &mut dyn WriteColor, + files: &SimpleFiles, + file_mapping: &FileMapping, + mut diags: Diagnostics, +) { + diags.diagnostics.sort_by(|e1, e2| { + let loc1: &Loc = &e1.primary_label.0; + let loc2: &Loc = &e2.primary_label.0; + loc1.cmp(loc2) + }); + let mut seen: HashSet = HashSet::new(); + for diag in diags.diagnostics { + if seen.contains(&diag) { + continue; + } + seen.insert(diag.clone()); + let rendered = render_diagnostic(file_mapping, diag); + emit(writer, &Config::default(), files, &rendered).unwrap() + } +} + +fn convert_loc(file_mapping: &FileMapping, loc: Loc) -> (FileId, Range) { + let fname = loc.file_hash(); + let id = *file_mapping.get(&fname).unwrap(); + let range = loc.usize_range(); + (id, range) +} + +fn render_diagnostic( + file_mapping: &FileMapping, + diag: Diagnostic, +) -> csr::diagnostic::Diagnostic { + use csr::diagnostic::{Label, LabelStyle}; + let mk_lbl = |style: LabelStyle, msg: (Loc, String)| -> Label { + let (id, range) = convert_loc(file_mapping, msg.0); + csr::diagnostic::Label::new(style, id, range).with_message(msg.1) + }; + let Diagnostic { + info, + primary_label, + secondary_labels, + notes, + } = diag; + let mut diag = csr::diagnostic::Diagnostic::new(info.severity().into_codespan_severity()); + let (code, message) = info.render(); + diag = diag.with_code(code); + diag = diag.with_message(message); + diag = diag.with_labels(vec![mk_lbl(LabelStyle::Primary, primary_label)]); + diag = diag.with_labels( + secondary_labels + .into_iter() + .map(|msg| mk_lbl(LabelStyle::Secondary, msg)) + .collect(), + ); + diag = diag.with_notes(notes); + diag +} + +//************************************************************************************************** +// impls +//************************************************************************************************** + +impl Diagnostics { + pub fn new() -> Self { + Self { + diagnostics: vec![], + severity_count: BTreeMap::new(), + } + } + + pub fn max_severity(&self) -> Option { + debug_assert!(self.severity_count.values().all(|count| *count > 0)); + self.severity_count + .iter() + .max_by_key(|(sev, _count)| **sev) + .map(|(sev, _count)| *sev) + } + + pub fn is_empty(&self) -> bool { + self.diagnostics.is_empty() + } + + pub fn len(&self) -> usize { + self.diagnostics.len() + } + + pub fn add(&mut self, diag: Diagnostic) { + *self.severity_count.entry(diag.info.severity()).or_insert(0) += 1; + self.diagnostics.push(diag) + } + + pub fn add_opt(&mut self, diag_opt: Option) { + if let Some(diag) = diag_opt { + self.add(diag) + } + } + + pub fn extend(&mut self, other: Self) { + let Self { + diagnostics, + severity_count, + } = other; + for (sev, count) in severity_count { + *self.severity_count.entry(sev).or_insert(0) += count; + } + self.diagnostics.extend(diagnostics) + } + + pub fn into_vec(self) -> Vec { + self.diagnostics + } + + pub fn into_codespan_format( + self, + ) -> Vec<( + codespan_reporting::diagnostic::Severity, + &'static str, + (Loc, String), + Vec<(Loc, String)>, + Vec, + )> { + let mut v = vec![]; + for diag in self.into_vec() { + let Diagnostic { + info, + primary_label, + secondary_labels, + notes, + } = diag; + let csr_diag = ( + info.severity().into_codespan_severity(), + info.message(), + primary_label, + secondary_labels, + notes, + ); + v.push(csr_diag) + } + v + } +} + +impl Diagnostic { + pub fn new( + code: impl DiagnosticCode, + (loc, label): (Loc, impl ToString), + secondary_labels: impl IntoIterator, + notes: impl IntoIterator, + ) -> Self { + let info = code.into_info(); + let label = Diagnostic::add_backtrace( + &label.to_string(), + false, /*info.severity() == Severity::Bug*/ + ); + Diagnostic { + info, + primary_label: (loc, label.to_string()), + secondary_labels: secondary_labels + .into_iter() + .map(|(loc, msg)| (loc, msg.to_string())) + .collect(), + notes: notes.into_iter().map(|msg| msg.to_string()).collect(), + } + } + + fn add_backtrace(msg: &str, _is_bug: bool) -> String { + // Note that you need both MOVE_COMPILER_BACKTRACE=1 and RUST_BACKTRACE=1 for this to + // actually generate a backtrace. + static DUMP_BACKTRACE: Lazy = Lazy::new(|| { + read_bool_env_var(crate::command_line::MOVE_COMPILER_BACKTRACE_ENV_VAR) + | read_bool_env_var(crate::command_line::MVC_BACKTRACE_ENV_VAR) + }); + if *DUMP_BACKTRACE { + let bt = Backtrace::capture(); + if BacktraceStatus::Captured == bt.status() { + format!("{}\nBacktrace: {:#?}", msg, bt) + } else { + msg.to_owned() + } + } else { + msg.to_owned() + } + } + + pub fn set_code(mut self, code: impl DiagnosticCode) -> Self { + self.info = code.into_info(); + self + } + + #[allow(unused)] + pub fn add_secondary_labels( + &mut self, + additional_labels: impl IntoIterator, + ) { + self.secondary_labels.extend( + additional_labels + .into_iter() + .map(|(loc, msg)| (loc, msg.to_string())), + ) + } + + pub fn add_secondary_label(&mut self, (loc, msg): (Loc, impl ToString)) { + self.secondary_labels.push((loc, msg.to_string())) + } + + pub fn extra_labels_len(&self) -> usize { + self.secondary_labels.len() + self.notes.len() + } + + #[allow(unused)] + pub fn add_notes(&mut self, additional_notes: impl IntoIterator) { + self.notes + .extend(additional_notes.into_iter().map(|msg| msg.to_string())) + } + + pub fn add_note(&mut self, msg: impl ToString) { + self.notes.push(msg.to_string()) + } +} + +#[macro_export] +macro_rules! diag { + ($code: expr, $primary: expr $(,)?) => {{ + #[allow(unused)] + use $crate::diagnostics::codes::*; + $crate::diagnostics::Diagnostic::new( + $code, + $primary, + std::iter::empty::<(move_ir_types::location::Loc, String)>(), + std::iter::empty::(), + ) + }}; + ($code: expr, $primary: expr, $($secondary: expr),+ $(,)?) => {{ + #[allow(unused)] + use $crate::diagnostics::codes::*; + $crate::diagnostics::Diagnostic::new( + $code, + $primary, + vec![$($secondary, )*], + std::iter::empty::(), + ) + }}; +} + +//************************************************************************************************** +// traits +//************************************************************************************************** + +impl FromIterator for Diagnostics { + fn from_iter>(iter: I) -> Self { + let diagnostics = iter.into_iter().collect::>(); + Self::from(diagnostics) + } +} + +impl From> for Diagnostics { + fn from(diagnostics: Vec) -> Self { + let mut severity_count = BTreeMap::new(); + for diag in &diagnostics { + *severity_count.entry(diag.info.severity()).or_insert(0) += 1; + } + Self { + diagnostics, + severity_count, + } + } +} + +impl From> for Diagnostics { + fn from(diagnostic_opt: Option) -> Self { + Diagnostics::from(diagnostic_opt.map_or_else(Vec::new, |diag| vec![diag])) + } +} diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/aliases.rs b/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/aliases.rs new file mode 100644 index 0000000000000..61e35df301d42 --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/aliases.rs @@ -0,0 +1,275 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + expansion::ast::{ModuleIdent, ModuleIdent_}, + parser::ast::ModuleName, + shared::{unique_map::UniqueMap, unique_set::UniqueSet, *}, +}; +use move_ir_types::location::*; + +type ScopeDepth = usize; + +#[derive(Clone, Debug)] +pub struct AliasSet { + pub modules: UniqueSet, + pub members: UniqueSet, +} + +#[derive(Clone, Debug)] +pub struct AliasMapBuilder { + modules: UniqueMap, + members: UniqueMap, +} + +#[derive(Clone, Debug)] +pub struct AliasMap { + modules: UniqueMap, ModuleIdent)>, + members: UniqueMap, (ModuleIdent, Name))>, + // essentially a mapping from ScopeDepth => AliasSet, which are the unused aliases at that depth + unused: Vec, +} + +pub struct OldAliasMap(Option); + +impl AliasSet { + pub fn new() -> Self { + Self { + modules: UniqueSet::new(), + members: UniqueSet::new(), + } + } + + #[allow(unused)] + pub fn is_empty(&self) -> bool { + let Self { modules, members } = self; + modules.is_empty() && members.is_empty() + } +} + +impl AliasMapBuilder { + pub fn new() -> Self { + Self { + modules: UniqueMap::new(), + members: UniqueMap::new(), + } + } + + pub fn is_empty(&self) -> bool { + let Self { modules, members } = self; + modules.is_empty() && members.is_empty() + } + + fn remove_module_alias_(&mut self, alias: &Name) -> Result<(), Loc> { + let loc = self.modules.get_loc(alias).cloned(); + match self.modules.remove(alias) { + None => Ok(()), + Some(_) => Err(loc.unwrap()), + } + } + + fn remove_member_alias_(&mut self, alias: &Name) -> Result<(), Loc> { + let loc = self.members.get_loc(alias).cloned(); + match self.members.remove(alias) { + None => Ok(()), + Some(_) => Err(loc.unwrap()), + } + } + + /// Adds a module alias to the map. + /// Errors if one already bound for that alias + pub fn add_module_alias(&mut self, alias: Name, ident: ModuleIdent) -> Result<(), Loc> { + let result = self.remove_module_alias_(&alias); + self.modules + .add(alias, (ident, /* is_implicit */ false)) + .unwrap(); + result + } + + /// Adds a member alias to the map. + /// Errors if one already bound for that alias + pub fn add_member_alias( + &mut self, + alias: Name, + ident: ModuleIdent, + member: Name, + ) -> Result<(), Loc> { + let result = self.remove_member_alias_(&alias); + self.members + .add(alias, ((ident, member), /* is_implicit */ false)) + .unwrap(); + result + } + + /// Same as `add_module_alias` but it does not update the scope, and as such it will not be + /// reported as unused + pub fn add_implicit_module_alias( + &mut self, + alias: Name, + ident: ModuleIdent, + ) -> Result<(), Loc> { + let result = self.remove_module_alias_(&alias); + self.modules + .add(alias, (ident, /* is_implicit */ true)) + .unwrap(); + result + } + + /// Same as `add_member_alias` but it does not update the scope, and as such it will not be + /// reported as unused + pub fn add_implicit_member_alias( + &mut self, + alias: Name, + ident: ModuleIdent, + member: Name, + ) -> Result<(), Loc> { + let result = self.remove_member_alias_(&alias); + self.members + .add(alias, ((ident, member), /* is_implicit */ true)) + .unwrap(); + result + } +} + +impl AliasMap { + pub fn new() -> Self { + Self { + modules: UniqueMap::new(), + members: UniqueMap::new(), + unused: vec![], + } + } + + pub fn is_empty(&self) -> bool { + let Self { + modules, + members, + unused: _, + } = self; + modules.is_empty() && members.is_empty() + } + + fn current_depth(&self) -> usize { + self.unused.len() + } + + pub fn module_alias_get(&mut self, n: &Name) -> Option { + match self.modules.get_mut(n) { + None => None, + Some((depth_opt, ident)) => { + if let Some(depth) = depth_opt { + self.unused[*depth].modules.remove(n); + } + *depth_opt = None; + // We are preserving the name's original location, rather than referring to where + // the alias was defined. The name represents JUST the module name, though, so we do + // not change location of the address as we don't have this information. + // TODO maybe we should also keep the alias reference (or its location)? + let sp!(_, ModuleIdent_ { + address, + module: ModuleName(sp!(_, module)) + }) = ident; + let address = *address; + let module = ModuleName(sp(n.loc, *module)); + Some(sp(n.loc, ModuleIdent_ { address, module })) + }, + } + } + + pub fn member_alias_get(&mut self, n: &Name) -> Option<(ModuleIdent, Name)> { + match self.members.get_mut(n) { + None => None, + Some((depth_opt, (sp!(mem_mod_loc, mem_mod), sp!(_, mem_name)))) => { + if let Some(depth) = depth_opt { + self.unused[*depth].members.remove(n); + } + *depth_opt = None; + // We are preserving the name's original location, rather than referring to where + // the alias was defined. The name represents JUST the member name, though, so we do + // not change location of the module as we don't have this information. + // TODO maybe we should also keep the alias reference (or its location)? + Some((sp(*mem_mod_loc, *mem_mod), sp(n.loc, *mem_name))) + }, + } + } + + /// Adds all of the new items in the new inner scope as shadowing the outer one. + /// Gives back the outer scope + pub fn add_and_shadow_all(&mut self, shadowing: AliasMapBuilder) -> OldAliasMap { + if shadowing.is_empty() { + return OldAliasMap(None); + } + + let outer_scope = OldAliasMap(Some(self.clone())); + let AliasMapBuilder { + modules: new_modules, + members: new_members, + } = shadowing; + + let next_depth = self.current_depth(); + let mut current_scope = AliasSet::new(); + for (alias, (ident, is_implicit)) in new_modules { + if !is_implicit { + current_scope.modules.add(alias).unwrap(); + } + self.modules.remove(&alias); + self.modules.add(alias, (Some(next_depth), ident)).unwrap(); + } + for (alias, (ident_member, is_implicit)) in new_members { + if !is_implicit { + current_scope.members.add(alias).unwrap(); + } + self.members.remove(&alias); + self.members + .add(alias, (Some(next_depth), ident_member)) + .unwrap(); + } + self.unused.push(current_scope); + outer_scope + } + + /// Similar to add_and_shadow but just removes aliases now shadowed by a type parameter + pub fn shadow_for_type_parameters<'a, I: IntoIterator>( + &mut self, + tparams: I, + ) -> OldAliasMap + where + I::IntoIter: ExactSizeIterator, + { + let tparams_iter = tparams.into_iter(); + if tparams_iter.len() == 0 { + return OldAliasMap(None); + } + + let outer_scope = OldAliasMap(Some(self.clone())); + self.unused.push(AliasSet::new()); + for tp_name in tparams_iter { + self.members.remove(tp_name); + } + outer_scope + } + + /// Resets the alias map and gives the set of aliases that were unused + pub fn set_to_outer_scope(&mut self, outer_scope: OldAliasMap) -> AliasSet { + let outer_scope = match outer_scope.0 { + None => return AliasSet::new(), + Some(outer) => outer, + }; + let mut inner_scope = std::mem::replace(self, outer_scope); + let outer_scope = self; + assert!(outer_scope.current_depth() + 1 == inner_scope.current_depth()); + let unused = inner_scope.unused.pop().unwrap(); + outer_scope.unused = inner_scope.unused; + unused + } +} + +impl OldAliasMap { + pub fn is_empty(&self) -> bool { + match &self.0 { + None => true, + Some(aliases) => aliases.is_empty(), + } + } +} diff --git a/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/ast.rs b/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/ast.rs new file mode 100644 index 0000000000000..34e84c50ce1c3 --- /dev/null +++ b/third_party/move/move-compiler-v2/legacy-move-compiler/src/expansion/ast.rs @@ -0,0 +1,1978 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + expansion::translate::is_valid_struct_constant_or_schema_name, + parser::ast::{ + self as P, Ability, Ability_, BinOp, CallKind, ConstantName, Field, FunctionName, Label, + LambdaCaptureKind, ModuleName, QuantKind, SpecApplyPattern, StructName, UnaryOp, UseDecl, + Var, VariantName, ENTRY_MODIFIER, + }, + shared::{ + ast_debug::*, + known_attributes::{AttributeKind, KnownAttribute}, + unique_map::UniqueMap, + unique_set::UniqueSet, + *, + }, +}; +use move_ir_types::location::*; +use move_symbol_pool::Symbol; +use std::{ + collections::{BTreeMap, BTreeSet, VecDeque}, + fmt, + hash::Hash, +}; +//************************************************************************************************** +// Program +//************************************************************************************************** + +#[derive(Debug, Clone)] +pub struct Program { + // Map of declared named addresses, and their values if specified + pub modules: UniqueMap, + pub scripts: BTreeMap, +} + +//************************************************************************************************** +// Attributes +//************************************************************************************************** + +#[derive(Debug, Clone, PartialEq, Eq)] +#[allow(clippy::large_enum_variant)] +pub enum AttributeValue_ { + Value(Value), + Module(ModuleIdent), + ModuleAccess(ModuleAccess), +} +pub type AttributeValue = Spanned; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Attribute_ { + Name(Name), + Assigned(Name, Box), + Parameterized(Name, Attributes), +} +pub type Attribute = Spanned; + +impl Attribute_ { + pub fn attribute_name(&self) -> &Name { + match self { + Attribute_::Name(nm) + | Attribute_::Assigned(nm, _) + | Attribute_::Parameterized(nm, _) => nm, + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub enum AttributeName_ { + Unknown(Symbol), + Known(KnownAttribute), +} +pub type AttributeName = Spanned; + +pub type Attributes = UniqueMap; + +//************************************************************************************************** +// Scripts +//************************************************************************************************** + +#[derive(Debug, Clone)] +pub struct Script { + // package name metadata from compiler arguments, not used for any language rules + pub package_name: Option, + pub attributes: Attributes, + pub loc: Loc, + pub immediate_neighbors: UniqueMap, + pub used_addresses: BTreeSet
, + pub constants: UniqueMap, + pub function_name: FunctionName, + pub function: Function, + pub specs: Vec, + pub use_decls: Vec, +} + +//************************************************************************************************** +// Modules +//************************************************************************************************** + +#[derive(Clone, Copy)] +pub enum Address { + Numerical(Option, Spanned), + NamedUnassigned(Name), +} +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ModuleIdent_ { + pub address: Address, + pub module: ModuleName, +} +pub type ModuleIdent = Spanned; + +#[derive(Debug, Clone)] +pub struct ModuleDefinition { + // package name metadata from compiler arguments, not used for any language rules + pub package_name: Option, + pub attributes: Attributes, + pub loc: Loc, + pub is_source_module: bool, + /// `dependency_order` is the topological order/rank in the dependency graph. + /// `dependency_order` is initialized at `0` and set in the uses pass + pub dependency_order: usize, + pub immediate_neighbors: UniqueMap, + pub used_addresses: BTreeSet
, + pub friends: UniqueMap, + pub structs: UniqueMap, + pub functions: UniqueMap, + pub constants: UniqueMap, + pub specs: Vec, + pub use_decls: Vec, +} + +//************************************************************************************************** +// Friend +//************************************************************************************************** + +#[derive(Debug, Clone)] +pub struct Friend { + pub attributes: Attributes, + pub loc: Loc, +} + +#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)] +pub enum Neighbor { + Dependency, + Friend, +} + +//************************************************************************************************** +// Structs +//************************************************************************************************** + +pub type Fields = UniqueMap; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StructTypeParameter { + pub is_phantom: bool, + pub name: Name, + pub constraints: AbilitySet, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct StructDefinition { + pub attributes: Attributes, + pub loc: Loc, + pub abilities: AbilitySet, + pub type_parameters: Vec, + pub layout: StructLayout, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum StructLayout { + // the second field is true iff the struct has positional fields + Singleton(Fields, bool), + Variants(Vec), + Native(Loc), +} + +#[derive(Debug, PartialEq, Clone)] +pub struct StructVariant { + pub attributes: Attributes, + pub loc: Loc, + pub name: VariantName, + pub fields: Fields, + pub is_positional: bool, +} + +//************************************************************************************************** +// Functions +//************************************************************************************************** + +#[derive(PartialEq, Eq, Debug, Clone)] +pub enum Visibility { + Public(Loc), + Friend(Loc), + Package(Loc), + Internal, +} + +#[derive(PartialEq, Clone, Debug)] +pub struct FunctionSignature { + pub type_parameters: Vec<(Name, AbilitySet)>, + pub parameters: Vec<(Var, Type)>, + pub return_type: Type, +} + +#[derive(PartialEq, Clone, Debug)] +pub enum FunctionBody_ { + Defined(Sequence), + Native, +} +pub type FunctionBody = Spanned; + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct SpecId(usize); + +#[derive(PartialEq, Clone, Debug)] +pub struct Function { + pub attributes: Attributes, + pub loc: Loc, + pub inline: bool, + pub visibility: Visibility, + pub entry: Option, + pub signature: FunctionSignature, + pub acquires: Vec, + // Only v2 compiler + pub access_specifiers: Option>, + pub body: FunctionBody, + pub specs: BTreeMap, +} + +#[derive(PartialEq, Clone, Debug)] +pub struct AccessSpecifier_ { + pub kind: AccessSpecifierKind, + pub negated: bool, + pub module_address: Option
, + pub module_name: Option, + pub resource_name: Option, + pub type_args: Option>, + pub address: AddressSpecifier, +} + +#[derive(PartialEq, Clone, Debug)] +pub enum AccessSpecifierKind { + Reads, + Writes, + LegacyAcquires, +} + +pub type AccessSpecifier = Spanned; + +#[derive(PartialEq, Clone, Debug)] +pub enum AddressSpecifier_ { + Any, + Empty, + Literal(NumericalAddress), + Name(Name), + Call(ModuleAccess, Option>, Name), +} + +pub type AddressSpecifier = Spanned; + +//************************************************************************************************** +// Constants +//************************************************************************************************** + +#[derive(PartialEq, Clone, Debug)] +pub struct Constant { + pub attributes: Attributes, + pub loc: Loc, + pub signature: Type, + pub value: Exp, +} + +//************************************************************************************************** +// Specification Blocks +//************************************************************************************************** + +#[derive(Debug, Clone, PartialEq)] +pub struct SpecBlock_ { + pub attributes: Attributes, + pub target: SpecBlockTarget, + pub members: Vec, +} +pub type SpecBlock = Spanned; + +#[derive(Debug, Clone, PartialEq)] +pub enum SpecBlockTarget_ { + Code, + Module, + Member(Name, Option>), + Schema(Name, Vec<(Name, AbilitySet)>), +} + +pub type SpecBlockTarget = Spanned; + +#[derive(Debug, Clone, PartialEq)] +#[allow(clippy::large_enum_variant)] +pub enum SpecBlockMember_ { + Condition { + kind: SpecConditionKind, + properties: Vec, + exp: Exp, + additional_exps: Vec, + }, + Function { + uninterpreted: bool, + name: FunctionName, + signature: FunctionSignature, + body: FunctionBody, + }, + Variable { + is_global: bool, + name: Name, + type_parameters: Vec<(Name, AbilitySet)>, + type_: Type, + init: Option, + }, + Update { + lhs: Exp, + rhs: Exp, + }, + Let { + name: Name, + post_state: bool, + def: Exp, + }, + Include { + properties: Vec, + exp: Exp, + }, + Apply { + exp: Exp, + patterns: Vec, + exclusion_patterns: Vec, + }, + Pragma { + properties: Vec, + }, +} +pub type SpecBlockMember = Spanned; + +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum SpecConditionKind_ { + Assert, + Assume, + Decreases, + AbortsIf, + AbortsWith, + SucceedsIf, + Modifies, + Emits, + Ensures, + Requires, + Invariant(Vec<(Name, AbilitySet)>), + InvariantUpdate(Vec<(Name, AbilitySet)>), + Axiom(Vec<(Name, AbilitySet)>), +} +pub type SpecConditionKind = Spanned; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PragmaProperty_ { + pub name: Name, + pub value: Option, +} +pub type PragmaProperty = Spanned; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PragmaValue { + Literal(Value), + Ident(ModuleAccess), +} + +//************************************************************************************************** +// Types +//************************************************************************************************** + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AbilitySet(UniqueSet); + +#[derive(Debug, Clone, PartialEq, Eq)] +#[allow(clippy::large_enum_variant)] +pub enum ModuleAccess_ { + Name(Name), + // ModuleAccess(module_ident, member_ident, optional_variant_ident) + ModuleAccess(ModuleIdent, Name, Option), +} + +impl ModuleAccess_ { + pub fn get_name(&self) -> &Name { + match self { + ModuleAccess_::Name(n) | ModuleAccess_::ModuleAccess(_, n, _) => n, + } + } + + pub fn is_valid_struct_constant_or_schema_name(&self) -> bool { + is_valid_struct_constant_or_schema_name(self.get_name().value.as_str()) + } +} + +pub type ModuleAccess = Spanned; + +#[derive(Debug, Clone, PartialEq)] +#[allow(clippy::large_enum_variant)] +pub enum Type_ { + Unit, + Multiple(Vec), + Apply(ModuleAccess, Vec), + Ref(bool, Box), + Fun(Vec, Box, AbilitySet), + UnresolvedError, +} +pub type Type = Spanned; + +//************************************************************************************************** +// Expressions +//************************************************************************************************** + +#[derive(Debug, Clone, PartialEq)] +pub enum LValue_ { + Var(ModuleAccess, Option>), + Unpack( + ModuleAccess, + Option>, + Fields, + Option, + ), + PositionalUnpack(ModuleAccess, Option>, LValueOrDotDotList), +} +pub type LValue = Spanned; +pub type LValueList_ = Vec; +pub type LValueList = Spanned; + +/// These represent LValues with user-specified explicit types. +#[derive(Debug, Clone, PartialEq)] +pub struct TypedLValue_(pub LValue, pub Option); +pub type TypedLValue = Spanned; +pub type TypedLValueList_ = Vec; +pub type TypedLValueList = Spanned; + +pub fn wild_card(loc: Loc) -> LValue { + let wildcard = sp(loc, Symbol::from("_")); + let lvalue_ = LValue_::Var(sp(loc, ModuleAccess_::Name(wildcard)), None); + sp(loc, lvalue_) +} + +#[derive(Debug, Clone, PartialEq)] +pub struct DotDot_; +pub type DotDot = Spanned; + +#[derive(Debug, Clone, PartialEq)] +pub enum LValueOrDotDot_ { + LValue(LValue), + DotDot, +} +pub type LValueOrDotDot = Spanned; +pub type LValueOrDotDotList_ = Vec; +pub type LValueOrDotDotList = Spanned; + +pub type LValueWithRange_ = (LValue, Exp); +pub type LValueWithRange = Spanned; +pub type LValueWithRangeList_ = Vec; +pub type LValueWithRangeList = Spanned; + +#[derive(Debug, Clone, PartialEq)] +#[allow(clippy::large_enum_variant)] +pub enum ExpDotted_ { + Exp(Exp), + Dot(Box, Name), +} +pub type ExpDotted = Spanned; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Value_ { + // 0x + Address(Address), + // + InferredNum(move_core_types::u256::U256), + // u8 + U8(u8), + // u16 + U16(u16), + // u32 + U32(u32), + // u64 + U64(u64), + // u128 + U128(u128), + // u256 + U256(move_core_types::u256::U256), + // true + // false + Bool(bool), + Bytearray(Vec), +} +pub type Value = Spanned; + +#[derive(Debug, Clone, PartialEq)] +#[allow(clippy::large_enum_variant)] +pub enum Exp_ { + Value(Value), + Move(Var), + Copy(Var), + + Name(ModuleAccess, Option>), + Call(ModuleAccess, CallKind, Option>, Spanned>), + ExpCall(Box, Spanned>), + Pack(ModuleAccess, Option>, Fields), + Vector(Loc, Option>, Spanned>), + + IfElse(Box, Box, Box), + Match(Box, Vec, Exp)>>), + While(Option